diff options
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r-- | drivers/net/e1000/e1000_ethtool.c | 313 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_hw.c | 762 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 910 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_osdep.h | 14 |
4 files changed, 1029 insertions, 970 deletions
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 4bcfa374f4d6..966d52a529ec 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -162,7 +162,7 @@ static int e1000_get_settings(struct net_device *netdev, | |||
162 | ecmd->transceiver = XCVR_EXTERNAL; | 162 | ecmd->transceiver = XCVR_EXTERNAL; |
163 | } | 163 | } |
164 | 164 | ||
165 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { | 165 | if (er32(STATUS) & E1000_STATUS_LU) { |
166 | 166 | ||
167 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, | 167 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, |
168 | &adapter->link_duplex); | 168 | &adapter->link_duplex); |
@@ -313,8 +313,9 @@ static u32 e1000_get_tx_csum(struct net_device *netdev) | |||
313 | static int e1000_set_tx_csum(struct net_device *netdev, u32 data) | 313 | static int e1000_set_tx_csum(struct net_device *netdev, u32 data) |
314 | { | 314 | { |
315 | struct e1000_adapter *adapter = netdev_priv(netdev); | 315 | struct e1000_adapter *adapter = netdev_priv(netdev); |
316 | struct e1000_hw *hw = &adapter->hw; | ||
316 | 317 | ||
317 | if (adapter->hw.mac_type < e1000_82543) { | 318 | if (hw->mac_type < e1000_82543) { |
318 | if (!data) | 319 | if (!data) |
319 | return -EINVAL; | 320 | return -EINVAL; |
320 | return 0; | 321 | return 0; |
@@ -331,8 +332,10 @@ static int e1000_set_tx_csum(struct net_device *netdev, u32 data) | |||
331 | static int e1000_set_tso(struct net_device *netdev, u32 data) | 332 | static int e1000_set_tso(struct net_device *netdev, u32 data) |
332 | { | 333 | { |
333 | struct e1000_adapter *adapter = netdev_priv(netdev); | 334 | struct e1000_adapter *adapter = netdev_priv(netdev); |
334 | if ((adapter->hw.mac_type < e1000_82544) || | 335 | struct e1000_hw *hw = &adapter->hw; |
335 | (adapter->hw.mac_type == e1000_82547)) | 336 | |
337 | if ((hw->mac_type < e1000_82544) || | ||
338 | (hw->mac_type == e1000_82547)) | ||
336 | return data ? -EINVAL : 0; | 339 | return data ? -EINVAL : 0; |
337 | 340 | ||
338 | if (data) | 341 | if (data) |
@@ -380,22 +383,22 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, | |||
380 | 383 | ||
381 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; | 384 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; |
382 | 385 | ||
383 | regs_buff[0] = E1000_READ_REG(hw, CTRL); | 386 | regs_buff[0] = er32(CTRL); |
384 | regs_buff[1] = E1000_READ_REG(hw, STATUS); | 387 | regs_buff[1] = er32(STATUS); |
385 | 388 | ||
386 | regs_buff[2] = E1000_READ_REG(hw, RCTL); | 389 | regs_buff[2] = er32(RCTL); |
387 | regs_buff[3] = E1000_READ_REG(hw, RDLEN); | 390 | regs_buff[3] = er32(RDLEN); |
388 | regs_buff[4] = E1000_READ_REG(hw, RDH); | 391 | regs_buff[4] = er32(RDH); |
389 | regs_buff[5] = E1000_READ_REG(hw, RDT); | 392 | regs_buff[5] = er32(RDT); |
390 | regs_buff[6] = E1000_READ_REG(hw, RDTR); | 393 | regs_buff[6] = er32(RDTR); |
391 | 394 | ||
392 | regs_buff[7] = E1000_READ_REG(hw, TCTL); | 395 | regs_buff[7] = er32(TCTL); |
393 | regs_buff[8] = E1000_READ_REG(hw, TDLEN); | 396 | regs_buff[8] = er32(TDLEN); |
394 | regs_buff[9] = E1000_READ_REG(hw, TDH); | 397 | regs_buff[9] = er32(TDH); |
395 | regs_buff[10] = E1000_READ_REG(hw, TDT); | 398 | regs_buff[10] = er32(TDT); |
396 | regs_buff[11] = E1000_READ_REG(hw, TIDV); | 399 | regs_buff[11] = er32(TIDV); |
397 | 400 | ||
398 | regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ | 401 | regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ |
399 | if (hw->phy_type == e1000_phy_igp) { | 402 | if (hw->phy_type == e1000_phy_igp) { |
400 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 403 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
401 | IGP01E1000_PHY_AGC_A); | 404 | IGP01E1000_PHY_AGC_A); |
@@ -453,14 +456,16 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, | |||
453 | if (hw->mac_type >= e1000_82540 && | 456 | if (hw->mac_type >= e1000_82540 && |
454 | hw->mac_type < e1000_82571 && | 457 | hw->mac_type < e1000_82571 && |
455 | hw->media_type == e1000_media_type_copper) { | 458 | hw->media_type == e1000_media_type_copper) { |
456 | regs_buff[26] = E1000_READ_REG(hw, MANC); | 459 | regs_buff[26] = er32(MANC); |
457 | } | 460 | } |
458 | } | 461 | } |
459 | 462 | ||
460 | static int e1000_get_eeprom_len(struct net_device *netdev) | 463 | static int e1000_get_eeprom_len(struct net_device *netdev) |
461 | { | 464 | { |
462 | struct e1000_adapter *adapter = netdev_priv(netdev); | 465 | struct e1000_adapter *adapter = netdev_priv(netdev); |
463 | return adapter->hw.eeprom.word_size * 2; | 466 | struct e1000_hw *hw = &adapter->hw; |
467 | |||
468 | return hw->eeprom.word_size * 2; | ||
464 | } | 469 | } |
465 | 470 | ||
466 | static int e1000_get_eeprom(struct net_device *netdev, | 471 | static int e1000_get_eeprom(struct net_device *netdev, |
@@ -574,6 +579,7 @@ static void e1000_get_drvinfo(struct net_device *netdev, | |||
574 | struct ethtool_drvinfo *drvinfo) | 579 | struct ethtool_drvinfo *drvinfo) |
575 | { | 580 | { |
576 | struct e1000_adapter *adapter = netdev_priv(netdev); | 581 | struct e1000_adapter *adapter = netdev_priv(netdev); |
582 | struct e1000_hw *hw = &adapter->hw; | ||
577 | char firmware_version[32]; | 583 | char firmware_version[32]; |
578 | u16 eeprom_data; | 584 | u16 eeprom_data; |
579 | 585 | ||
@@ -582,8 +588,8 @@ static void e1000_get_drvinfo(struct net_device *netdev, | |||
582 | 588 | ||
583 | /* EEPROM image version # is reported as firmware version # for | 589 | /* EEPROM image version # is reported as firmware version # for |
584 | * 8257{1|2|3} controllers */ | 590 | * 8257{1|2|3} controllers */ |
585 | e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); | 591 | e1000_read_eeprom(hw, 5, 1, &eeprom_data); |
586 | switch (adapter->hw.mac_type) { | 592 | switch (hw->mac_type) { |
587 | case e1000_82571: | 593 | case e1000_82571: |
588 | case e1000_82572: | 594 | case e1000_82572: |
589 | case e1000_82573: | 595 | case e1000_82573: |
@@ -608,7 +614,8 @@ static void e1000_get_ringparam(struct net_device *netdev, | |||
608 | struct ethtool_ringparam *ring) | 614 | struct ethtool_ringparam *ring) |
609 | { | 615 | { |
610 | struct e1000_adapter *adapter = netdev_priv(netdev); | 616 | struct e1000_adapter *adapter = netdev_priv(netdev); |
611 | e1000_mac_type mac_type = adapter->hw.mac_type; | 617 | struct e1000_hw *hw = &adapter->hw; |
618 | e1000_mac_type mac_type = hw->mac_type; | ||
612 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 619 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
613 | struct e1000_rx_ring *rxdr = adapter->rx_ring; | 620 | struct e1000_rx_ring *rxdr = adapter->rx_ring; |
614 | 621 | ||
@@ -628,7 +635,8 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
628 | struct ethtool_ringparam *ring) | 635 | struct ethtool_ringparam *ring) |
629 | { | 636 | { |
630 | struct e1000_adapter *adapter = netdev_priv(netdev); | 637 | struct e1000_adapter *adapter = netdev_priv(netdev); |
631 | e1000_mac_type mac_type = adapter->hw.mac_type; | 638 | struct e1000_hw *hw = &adapter->hw; |
639 | e1000_mac_type mac_type = hw->mac_type; | ||
632 | struct e1000_tx_ring *txdr, *tx_old; | 640 | struct e1000_tx_ring *txdr, *tx_old; |
633 | struct e1000_rx_ring *rxdr, *rx_old; | 641 | struct e1000_rx_ring *rxdr, *rx_old; |
634 | int i, err; | 642 | int i, err; |
@@ -714,9 +722,10 @@ err_setup: | |||
714 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, | 722 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, |
715 | u32 mask, u32 write) | 723 | u32 mask, u32 write) |
716 | { | 724 | { |
725 | struct e1000_hw *hw = &adapter->hw; | ||
717 | static const u32 test[] = | 726 | static const u32 test[] = |
718 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | 727 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
719 | u8 __iomem *address = adapter->hw.hw_addr + reg; | 728 | u8 __iomem *address = hw->hw_addr + reg; |
720 | u32 read; | 729 | u32 read; |
721 | int i; | 730 | int i; |
722 | 731 | ||
@@ -737,7 +746,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, | |||
737 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, | 746 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, |
738 | u32 mask, u32 write) | 747 | u32 mask, u32 write) |
739 | { | 748 | { |
740 | u8 __iomem *address = adapter->hw.hw_addr + reg; | 749 | struct e1000_hw *hw = &adapter->hw; |
750 | u8 __iomem *address = hw->hw_addr + reg; | ||
741 | u32 read; | 751 | u32 read; |
742 | 752 | ||
743 | writel(write & mask, address); | 753 | writel(write & mask, address); |
@@ -755,7 +765,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, | |||
755 | #define REG_PATTERN_TEST(reg, mask, write) \ | 765 | #define REG_PATTERN_TEST(reg, mask, write) \ |
756 | do { \ | 766 | do { \ |
757 | if (reg_pattern_test(adapter, data, \ | 767 | if (reg_pattern_test(adapter, data, \ |
758 | (adapter->hw.mac_type >= e1000_82543) \ | 768 | (hw->mac_type >= e1000_82543) \ |
759 | ? E1000_##reg : E1000_82542_##reg, \ | 769 | ? E1000_##reg : E1000_82542_##reg, \ |
760 | mask, write)) \ | 770 | mask, write)) \ |
761 | return 1; \ | 771 | return 1; \ |
@@ -764,7 +774,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, | |||
764 | #define REG_SET_AND_CHECK(reg, mask, write) \ | 774 | #define REG_SET_AND_CHECK(reg, mask, write) \ |
765 | do { \ | 775 | do { \ |
766 | if (reg_set_and_check(adapter, data, \ | 776 | if (reg_set_and_check(adapter, data, \ |
767 | (adapter->hw.mac_type >= e1000_82543) \ | 777 | (hw->mac_type >= e1000_82543) \ |
768 | ? E1000_##reg : E1000_82542_##reg, \ | 778 | ? E1000_##reg : E1000_82542_##reg, \ |
769 | mask, write)) \ | 779 | mask, write)) \ |
770 | return 1; \ | 780 | return 1; \ |
@@ -774,11 +784,12 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
774 | { | 784 | { |
775 | u32 value, before, after; | 785 | u32 value, before, after; |
776 | u32 i, toggle; | 786 | u32 i, toggle; |
787 | struct e1000_hw *hw = &adapter->hw; | ||
777 | 788 | ||
778 | /* The status register is Read Only, so a write should fail. | 789 | /* The status register is Read Only, so a write should fail. |
779 | * Some bits that get toggled are ignored. | 790 | * Some bits that get toggled are ignored. |
780 | */ | 791 | */ |
781 | switch (adapter->hw.mac_type) { | 792 | switch (hw->mac_type) { |
782 | /* there are several bits on newer hardware that are r/w */ | 793 | /* there are several bits on newer hardware that are r/w */ |
783 | case e1000_82571: | 794 | case e1000_82571: |
784 | case e1000_82572: | 795 | case e1000_82572: |
@@ -794,10 +805,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
794 | break; | 805 | break; |
795 | } | 806 | } |
796 | 807 | ||
797 | before = E1000_READ_REG(&adapter->hw, STATUS); | 808 | before = er32(STATUS); |
798 | value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); | 809 | value = (er32(STATUS) & toggle); |
799 | E1000_WRITE_REG(&adapter->hw, STATUS, toggle); | 810 | ew32(STATUS, toggle); |
800 | after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; | 811 | after = er32(STATUS) & toggle; |
801 | if (value != after) { | 812 | if (value != after) { |
802 | DPRINTK(DRV, ERR, "failed STATUS register test got: " | 813 | DPRINTK(DRV, ERR, "failed STATUS register test got: " |
803 | "0x%08X expected: 0x%08X\n", after, value); | 814 | "0x%08X expected: 0x%08X\n", after, value); |
@@ -805,9 +816,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
805 | return 1; | 816 | return 1; |
806 | } | 817 | } |
807 | /* restore previous status */ | 818 | /* restore previous status */ |
808 | E1000_WRITE_REG(&adapter->hw, STATUS, before); | 819 | ew32(STATUS, before); |
809 | 820 | ||
810 | if (adapter->hw.mac_type != e1000_ich8lan) { | 821 | if (hw->mac_type != e1000_ich8lan) { |
811 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 822 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
812 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); | 823 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); |
813 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); | 824 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); |
@@ -827,20 +838,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
827 | 838 | ||
828 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); | 839 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); |
829 | 840 | ||
830 | before = (adapter->hw.mac_type == e1000_ich8lan ? | 841 | before = (hw->mac_type == e1000_ich8lan ? |
831 | 0x06C3B33E : 0x06DFB3FE); | 842 | 0x06C3B33E : 0x06DFB3FE); |
832 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); | 843 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); |
833 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 844 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
834 | 845 | ||
835 | if (adapter->hw.mac_type >= e1000_82543) { | 846 | if (hw->mac_type >= e1000_82543) { |
836 | 847 | ||
837 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); | 848 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); |
838 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 849 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
839 | if (adapter->hw.mac_type != e1000_ich8lan) | 850 | if (hw->mac_type != e1000_ich8lan) |
840 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | 851 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); |
841 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 852 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
842 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 853 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
843 | value = (adapter->hw.mac_type == e1000_ich8lan ? | 854 | value = (hw->mac_type == e1000_ich8lan ? |
844 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); | 855 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); |
845 | for (i = 0; i < value; i++) { | 856 | for (i = 0; i < value; i++) { |
846 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 857 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
@@ -856,7 +867,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
856 | 867 | ||
857 | } | 868 | } |
858 | 869 | ||
859 | value = (adapter->hw.mac_type == e1000_ich8lan ? | 870 | value = (hw->mac_type == e1000_ich8lan ? |
860 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); | 871 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); |
861 | for (i = 0; i < value; i++) | 872 | for (i = 0; i < value; i++) |
862 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); | 873 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); |
@@ -867,6 +878,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
867 | 878 | ||
868 | static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | 879 | static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) |
869 | { | 880 | { |
881 | struct e1000_hw *hw = &adapter->hw; | ||
870 | u16 temp; | 882 | u16 temp; |
871 | u16 checksum = 0; | 883 | u16 checksum = 0; |
872 | u16 i; | 884 | u16 i; |
@@ -874,7 +886,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | |||
874 | *data = 0; | 886 | *data = 0; |
875 | /* Read and add up the contents of the EEPROM */ | 887 | /* Read and add up the contents of the EEPROM */ |
876 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | 888 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { |
877 | if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { | 889 | if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { |
878 | *data = 1; | 890 | *data = 1; |
879 | break; | 891 | break; |
880 | } | 892 | } |
@@ -892,8 +904,9 @@ static irqreturn_t e1000_test_intr(int irq, void *data) | |||
892 | { | 904 | { |
893 | struct net_device *netdev = (struct net_device *) data; | 905 | struct net_device *netdev = (struct net_device *) data; |
894 | struct e1000_adapter *adapter = netdev_priv(netdev); | 906 | struct e1000_adapter *adapter = netdev_priv(netdev); |
907 | struct e1000_hw *hw = &adapter->hw; | ||
895 | 908 | ||
896 | adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR); | 909 | adapter->test_icr |= er32(ICR); |
897 | 910 | ||
898 | return IRQ_HANDLED; | 911 | return IRQ_HANDLED; |
899 | } | 912 | } |
@@ -904,6 +917,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
904 | u32 mask, i = 0; | 917 | u32 mask, i = 0; |
905 | bool shared_int = true; | 918 | bool shared_int = true; |
906 | u32 irq = adapter->pdev->irq; | 919 | u32 irq = adapter->pdev->irq; |
920 | struct e1000_hw *hw = &adapter->hw; | ||
907 | 921 | ||
908 | *data = 0; | 922 | *data = 0; |
909 | 923 | ||
@@ -921,13 +935,13 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
921 | (shared_int ? "shared" : "unshared")); | 935 | (shared_int ? "shared" : "unshared")); |
922 | 936 | ||
923 | /* Disable all the interrupts */ | 937 | /* Disable all the interrupts */ |
924 | E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); | 938 | ew32(IMC, 0xFFFFFFFF); |
925 | msleep(10); | 939 | msleep(10); |
926 | 940 | ||
927 | /* Test each interrupt */ | 941 | /* Test each interrupt */ |
928 | for (; i < 10; i++) { | 942 | for (; i < 10; i++) { |
929 | 943 | ||
930 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) | 944 | if (hw->mac_type == e1000_ich8lan && i == 8) |
931 | continue; | 945 | continue; |
932 | 946 | ||
933 | /* Interrupt to test */ | 947 | /* Interrupt to test */ |
@@ -941,8 +955,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
941 | * test failed. | 955 | * test failed. |
942 | */ | 956 | */ |
943 | adapter->test_icr = 0; | 957 | adapter->test_icr = 0; |
944 | E1000_WRITE_REG(&adapter->hw, IMC, mask); | 958 | ew32(IMC, mask); |
945 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 959 | ew32(ICS, mask); |
946 | msleep(10); | 960 | msleep(10); |
947 | 961 | ||
948 | if (adapter->test_icr & mask) { | 962 | if (adapter->test_icr & mask) { |
@@ -958,8 +972,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
958 | * test failed. | 972 | * test failed. |
959 | */ | 973 | */ |
960 | adapter->test_icr = 0; | 974 | adapter->test_icr = 0; |
961 | E1000_WRITE_REG(&adapter->hw, IMS, mask); | 975 | ew32(IMS, mask); |
962 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 976 | ew32(ICS, mask); |
963 | msleep(10); | 977 | msleep(10); |
964 | 978 | ||
965 | if (!(adapter->test_icr & mask)) { | 979 | if (!(adapter->test_icr & mask)) { |
@@ -975,8 +989,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
975 | * test failed. | 989 | * test failed. |
976 | */ | 990 | */ |
977 | adapter->test_icr = 0; | 991 | adapter->test_icr = 0; |
978 | E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); | 992 | ew32(IMC, ~mask & 0x00007FFF); |
979 | E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); | 993 | ew32(ICS, ~mask & 0x00007FFF); |
980 | msleep(10); | 994 | msleep(10); |
981 | 995 | ||
982 | if (adapter->test_icr) { | 996 | if (adapter->test_icr) { |
@@ -987,7 +1001,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
987 | } | 1001 | } |
988 | 1002 | ||
989 | /* Disable all the interrupts */ | 1003 | /* Disable all the interrupts */ |
990 | E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); | 1004 | ew32(IMC, 0xFFFFFFFF); |
991 | msleep(10); | 1005 | msleep(10); |
992 | 1006 | ||
993 | /* Unhook test interrupt handler */ | 1007 | /* Unhook test interrupt handler */ |
@@ -1044,6 +1058,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) | |||
1044 | 1058 | ||
1045 | static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | 1059 | static int e1000_setup_desc_rings(struct e1000_adapter *adapter) |
1046 | { | 1060 | { |
1061 | struct e1000_hw *hw = &adapter->hw; | ||
1047 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1062 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1048 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1063 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
1049 | struct pci_dev *pdev = adapter->pdev; | 1064 | struct pci_dev *pdev = adapter->pdev; |
@@ -1072,17 +1087,14 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1072 | memset(txdr->desc, 0, txdr->size); | 1087 | memset(txdr->desc, 0, txdr->size); |
1073 | txdr->next_to_use = txdr->next_to_clean = 0; | 1088 | txdr->next_to_use = txdr->next_to_clean = 0; |
1074 | 1089 | ||
1075 | E1000_WRITE_REG(&adapter->hw, TDBAL, | 1090 | ew32(TDBAL, ((u64) txdr->dma & 0x00000000FFFFFFFF)); |
1076 | ((u64) txdr->dma & 0x00000000FFFFFFFF)); | 1091 | ew32(TDBAH, ((u64) txdr->dma >> 32)); |
1077 | E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32)); | 1092 | ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); |
1078 | E1000_WRITE_REG(&adapter->hw, TDLEN, | 1093 | ew32(TDH, 0); |
1079 | txdr->count * sizeof(struct e1000_tx_desc)); | 1094 | ew32(TDT, 0); |
1080 | E1000_WRITE_REG(&adapter->hw, TDH, 0); | 1095 | ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | |
1081 | E1000_WRITE_REG(&adapter->hw, TDT, 0); | 1096 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | |
1082 | E1000_WRITE_REG(&adapter->hw, TCTL, | 1097 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); |
1083 | E1000_TCTL_PSP | E1000_TCTL_EN | | ||
1084 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | ||
1085 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1086 | 1098 | ||
1087 | for (i = 0; i < txdr->count; i++) { | 1099 | for (i = 0; i < txdr->count; i++) { |
1088 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); | 1100 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); |
@@ -1127,18 +1139,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1127 | memset(rxdr->desc, 0, rxdr->size); | 1139 | memset(rxdr->desc, 0, rxdr->size); |
1128 | rxdr->next_to_use = rxdr->next_to_clean = 0; | 1140 | rxdr->next_to_use = rxdr->next_to_clean = 0; |
1129 | 1141 | ||
1130 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1142 | rctl = er32(RCTL); |
1131 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); | 1143 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
1132 | E1000_WRITE_REG(&adapter->hw, RDBAL, | 1144 | ew32(RDBAL, ((u64) rxdr->dma & 0xFFFFFFFF)); |
1133 | ((u64) rxdr->dma & 0xFFFFFFFF)); | 1145 | ew32(RDBAH, ((u64) rxdr->dma >> 32)); |
1134 | E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32)); | 1146 | ew32(RDLEN, rxdr->size); |
1135 | E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); | 1147 | ew32(RDH, 0); |
1136 | E1000_WRITE_REG(&adapter->hw, RDH, 0); | 1148 | ew32(RDT, 0); |
1137 | E1000_WRITE_REG(&adapter->hw, RDT, 0); | ||
1138 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | 1149 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | |
1139 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1150 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1140 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1151 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
1141 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 1152 | ew32(RCTL, rctl); |
1142 | 1153 | ||
1143 | for (i = 0; i < rxdr->count; i++) { | 1154 | for (i = 0; i < rxdr->count; i++) { |
1144 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); | 1155 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); |
@@ -1168,68 +1179,72 @@ err_nomem: | |||
1168 | 1179 | ||
1169 | static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) | 1180 | static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) |
1170 | { | 1181 | { |
1182 | struct e1000_hw *hw = &adapter->hw; | ||
1183 | |||
1171 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ | 1184 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ |
1172 | e1000_write_phy_reg(&adapter->hw, 29, 0x001F); | 1185 | e1000_write_phy_reg(hw, 29, 0x001F); |
1173 | e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC); | 1186 | e1000_write_phy_reg(hw, 30, 0x8FFC); |
1174 | e1000_write_phy_reg(&adapter->hw, 29, 0x001A); | 1187 | e1000_write_phy_reg(hw, 29, 0x001A); |
1175 | e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0); | 1188 | e1000_write_phy_reg(hw, 30, 0x8FF0); |
1176 | } | 1189 | } |
1177 | 1190 | ||
1178 | static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) | 1191 | static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) |
1179 | { | 1192 | { |
1193 | struct e1000_hw *hw = &adapter->hw; | ||
1180 | u16 phy_reg; | 1194 | u16 phy_reg; |
1181 | 1195 | ||
1182 | /* Because we reset the PHY above, we need to re-force TX_CLK in the | 1196 | /* Because we reset the PHY above, we need to re-force TX_CLK in the |
1183 | * Extended PHY Specific Control Register to 25MHz clock. This | 1197 | * Extended PHY Specific Control Register to 25MHz clock. This |
1184 | * value defaults back to a 2.5MHz clock when the PHY is reset. | 1198 | * value defaults back to a 2.5MHz clock when the PHY is reset. |
1185 | */ | 1199 | */ |
1186 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); | 1200 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); |
1187 | phy_reg |= M88E1000_EPSCR_TX_CLK_25; | 1201 | phy_reg |= M88E1000_EPSCR_TX_CLK_25; |
1188 | e1000_write_phy_reg(&adapter->hw, | 1202 | e1000_write_phy_reg(hw, |
1189 | M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); | 1203 | M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); |
1190 | 1204 | ||
1191 | /* In addition, because of the s/w reset above, we need to enable | 1205 | /* In addition, because of the s/w reset above, we need to enable |
1192 | * CRS on TX. This must be set for both full and half duplex | 1206 | * CRS on TX. This must be set for both full and half duplex |
1193 | * operation. | 1207 | * operation. |
1194 | */ | 1208 | */ |
1195 | e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); | 1209 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); |
1196 | phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | 1210 | phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; |
1197 | e1000_write_phy_reg(&adapter->hw, | 1211 | e1000_write_phy_reg(hw, |
1198 | M88E1000_PHY_SPEC_CTRL, phy_reg); | 1212 | M88E1000_PHY_SPEC_CTRL, phy_reg); |
1199 | } | 1213 | } |
1200 | 1214 | ||
1201 | static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | 1215 | static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) |
1202 | { | 1216 | { |
1217 | struct e1000_hw *hw = &adapter->hw; | ||
1203 | u32 ctrl_reg; | 1218 | u32 ctrl_reg; |
1204 | u16 phy_reg; | 1219 | u16 phy_reg; |
1205 | 1220 | ||
1206 | /* Setup the Device Control Register for PHY loopback test. */ | 1221 | /* Setup the Device Control Register for PHY loopback test. */ |
1207 | 1222 | ||
1208 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1223 | ctrl_reg = er32(CTRL); |
1209 | ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ | 1224 | ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ |
1210 | E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1225 | E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ |
1211 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1226 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
1212 | E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ | 1227 | E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ |
1213 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1228 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1214 | 1229 | ||
1215 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg); | 1230 | ew32(CTRL, ctrl_reg); |
1216 | 1231 | ||
1217 | /* Read the PHY Specific Control Register (0x10) */ | 1232 | /* Read the PHY Specific Control Register (0x10) */ |
1218 | e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); | 1233 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); |
1219 | 1234 | ||
1220 | /* Clear Auto-Crossover bits in PHY Specific Control Register | 1235 | /* Clear Auto-Crossover bits in PHY Specific Control Register |
1221 | * (bits 6:5). | 1236 | * (bits 6:5). |
1222 | */ | 1237 | */ |
1223 | phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; | 1238 | phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; |
1224 | e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg); | 1239 | e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); |
1225 | 1240 | ||
1226 | /* Perform software reset on the PHY */ | 1241 | /* Perform software reset on the PHY */ |
1227 | e1000_phy_reset(&adapter->hw); | 1242 | e1000_phy_reset(hw); |
1228 | 1243 | ||
1229 | /* Have to setup TX_CLK and TX_CRS after software reset */ | 1244 | /* Have to setup TX_CLK and TX_CRS after software reset */ |
1230 | e1000_phy_reset_clk_and_crs(adapter); | 1245 | e1000_phy_reset_clk_and_crs(adapter); |
1231 | 1246 | ||
1232 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100); | 1247 | e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); |
1233 | 1248 | ||
1234 | /* Wait for reset to complete. */ | 1249 | /* Wait for reset to complete. */ |
1235 | udelay(500); | 1250 | udelay(500); |
@@ -1241,23 +1256,23 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | |||
1241 | e1000_phy_disable_receiver(adapter); | 1256 | e1000_phy_disable_receiver(adapter); |
1242 | 1257 | ||
1243 | /* Set the loopback bit in the PHY control register. */ | 1258 | /* Set the loopback bit in the PHY control register. */ |
1244 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1259 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1245 | phy_reg |= MII_CR_LOOPBACK; | 1260 | phy_reg |= MII_CR_LOOPBACK; |
1246 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg); | 1261 | e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); |
1247 | 1262 | ||
1248 | /* Setup TX_CLK and TX_CRS one more time. */ | 1263 | /* Setup TX_CLK and TX_CRS one more time. */ |
1249 | e1000_phy_reset_clk_and_crs(adapter); | 1264 | e1000_phy_reset_clk_and_crs(adapter); |
1250 | 1265 | ||
1251 | /* Check Phy Configuration */ | 1266 | /* Check Phy Configuration */ |
1252 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1267 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1253 | if (phy_reg != 0x4100) | 1268 | if (phy_reg != 0x4100) |
1254 | return 9; | 1269 | return 9; |
1255 | 1270 | ||
1256 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); | 1271 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); |
1257 | if (phy_reg != 0x0070) | 1272 | if (phy_reg != 0x0070) |
1258 | return 10; | 1273 | return 10; |
1259 | 1274 | ||
1260 | e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); | 1275 | e1000_read_phy_reg(hw, 29, &phy_reg); |
1261 | if (phy_reg != 0x001A) | 1276 | if (phy_reg != 0x001A) |
1262 | return 11; | 1277 | return 11; |
1263 | 1278 | ||
@@ -1266,29 +1281,30 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | |||
1266 | 1281 | ||
1267 | static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | 1282 | static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) |
1268 | { | 1283 | { |
1284 | struct e1000_hw *hw = &adapter->hw; | ||
1269 | u32 ctrl_reg = 0; | 1285 | u32 ctrl_reg = 0; |
1270 | u32 stat_reg = 0; | 1286 | u32 stat_reg = 0; |
1271 | 1287 | ||
1272 | adapter->hw.autoneg = false; | 1288 | hw->autoneg = false; |
1273 | 1289 | ||
1274 | if (adapter->hw.phy_type == e1000_phy_m88) { | 1290 | if (hw->phy_type == e1000_phy_m88) { |
1275 | /* Auto-MDI/MDIX Off */ | 1291 | /* Auto-MDI/MDIX Off */ |
1276 | e1000_write_phy_reg(&adapter->hw, | 1292 | e1000_write_phy_reg(hw, |
1277 | M88E1000_PHY_SPEC_CTRL, 0x0808); | 1293 | M88E1000_PHY_SPEC_CTRL, 0x0808); |
1278 | /* reset to update Auto-MDI/MDIX */ | 1294 | /* reset to update Auto-MDI/MDIX */ |
1279 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); | 1295 | e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); |
1280 | /* autoneg off */ | 1296 | /* autoneg off */ |
1281 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); | 1297 | e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); |
1282 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) | 1298 | } else if (hw->phy_type == e1000_phy_gg82563) |
1283 | e1000_write_phy_reg(&adapter->hw, | 1299 | e1000_write_phy_reg(hw, |
1284 | GG82563_PHY_KMRN_MODE_CTRL, | 1300 | GG82563_PHY_KMRN_MODE_CTRL, |
1285 | 0x1CC); | 1301 | 0x1CC); |
1286 | 1302 | ||
1287 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1303 | ctrl_reg = er32(CTRL); |
1288 | 1304 | ||
1289 | if (adapter->hw.phy_type == e1000_phy_ife) { | 1305 | if (hw->phy_type == e1000_phy_ife) { |
1290 | /* force 100, set loopback */ | 1306 | /* force 100, set loopback */ |
1291 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100); | 1307 | e1000_write_phy_reg(hw, PHY_CTRL, 0x6100); |
1292 | 1308 | ||
1293 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | 1309 | /* Now set up the MAC to the same speed/duplex as the PHY. */ |
1294 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1310 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ |
@@ -1298,10 +1314,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1298 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1314 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1299 | } else { | 1315 | } else { |
1300 | /* force 1000, set loopback */ | 1316 | /* force 1000, set loopback */ |
1301 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | 1317 | e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); |
1302 | 1318 | ||
1303 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | 1319 | /* Now set up the MAC to the same speed/duplex as the PHY. */ |
1304 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1320 | ctrl_reg = er32(CTRL); |
1305 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1321 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ |
1306 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1322 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ |
1307 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1323 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
@@ -1309,23 +1325,23 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1309 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1325 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1310 | } | 1326 | } |
1311 | 1327 | ||
1312 | if (adapter->hw.media_type == e1000_media_type_copper && | 1328 | if (hw->media_type == e1000_media_type_copper && |
1313 | adapter->hw.phy_type == e1000_phy_m88) | 1329 | hw->phy_type == e1000_phy_m88) |
1314 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | 1330 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ |
1315 | else { | 1331 | else { |
1316 | /* Set the ILOS bit on the fiber Nic is half | 1332 | /* Set the ILOS bit on the fiber Nic is half |
1317 | * duplex link is detected. */ | 1333 | * duplex link is detected. */ |
1318 | stat_reg = E1000_READ_REG(&adapter->hw, STATUS); | 1334 | stat_reg = er32(STATUS); |
1319 | if ((stat_reg & E1000_STATUS_FD) == 0) | 1335 | if ((stat_reg & E1000_STATUS_FD) == 0) |
1320 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | 1336 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); |
1321 | } | 1337 | } |
1322 | 1338 | ||
1323 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg); | 1339 | ew32(CTRL, ctrl_reg); |
1324 | 1340 | ||
1325 | /* Disable the receiver on the PHY so when a cable is plugged in, the | 1341 | /* Disable the receiver on the PHY so when a cable is plugged in, the |
1326 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | 1342 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. |
1327 | */ | 1343 | */ |
1328 | if (adapter->hw.phy_type == e1000_phy_m88) | 1344 | if (hw->phy_type == e1000_phy_m88) |
1329 | e1000_phy_disable_receiver(adapter); | 1345 | e1000_phy_disable_receiver(adapter); |
1330 | 1346 | ||
1331 | udelay(500); | 1347 | udelay(500); |
@@ -1335,12 +1351,13 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1335 | 1351 | ||
1336 | static int e1000_set_phy_loopback(struct e1000_adapter *adapter) | 1352 | static int e1000_set_phy_loopback(struct e1000_adapter *adapter) |
1337 | { | 1353 | { |
1354 | struct e1000_hw *hw = &adapter->hw; | ||
1338 | u16 phy_reg = 0; | 1355 | u16 phy_reg = 0; |
1339 | u16 count = 0; | 1356 | u16 count = 0; |
1340 | 1357 | ||
1341 | switch (adapter->hw.mac_type) { | 1358 | switch (hw->mac_type) { |
1342 | case e1000_82543: | 1359 | case e1000_82543: |
1343 | if (adapter->hw.media_type == e1000_media_type_copper) { | 1360 | if (hw->media_type == e1000_media_type_copper) { |
1344 | /* Attempt to setup Loopback mode on Non-integrated PHY. | 1361 | /* Attempt to setup Loopback mode on Non-integrated PHY. |
1345 | * Some PHY registers get corrupted at random, so | 1362 | * Some PHY registers get corrupted at random, so |
1346 | * attempt this 10 times. | 1363 | * attempt this 10 times. |
@@ -1374,9 +1391,9 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1374 | /* Default PHY loopback work is to read the MII | 1391 | /* Default PHY loopback work is to read the MII |
1375 | * control register and assert bit 14 (loopback mode). | 1392 | * control register and assert bit 14 (loopback mode). |
1376 | */ | 1393 | */ |
1377 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1394 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1378 | phy_reg |= MII_CR_LOOPBACK; | 1395 | phy_reg |= MII_CR_LOOPBACK; |
1379 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg); | 1396 | e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); |
1380 | return 0; | 1397 | return 0; |
1381 | break; | 1398 | break; |
1382 | } | 1399 | } |
@@ -1402,14 +1419,14 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) | |||
1402 | case e1000_82572: | 1419 | case e1000_82572: |
1403 | #define E1000_SERDES_LB_ON 0x410 | 1420 | #define E1000_SERDES_LB_ON 0x410 |
1404 | e1000_set_phy_loopback(adapter); | 1421 | e1000_set_phy_loopback(adapter); |
1405 | E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON); | 1422 | ew32(SCTL, E1000_SERDES_LB_ON); |
1406 | msleep(10); | 1423 | msleep(10); |
1407 | return 0; | 1424 | return 0; |
1408 | break; | 1425 | break; |
1409 | default: | 1426 | default: |
1410 | rctl = E1000_READ_REG(hw, RCTL); | 1427 | rctl = er32(RCTL); |
1411 | rctl |= E1000_RCTL_LBM_TCVR; | 1428 | rctl |= E1000_RCTL_LBM_TCVR; |
1412 | E1000_WRITE_REG(hw, RCTL, rctl); | 1429 | ew32(RCTL, rctl); |
1413 | return 0; | 1430 | return 0; |
1414 | } | 1431 | } |
1415 | } else if (hw->media_type == e1000_media_type_copper) | 1432 | } else if (hw->media_type == e1000_media_type_copper) |
@@ -1424,9 +1441,9 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1424 | u32 rctl; | 1441 | u32 rctl; |
1425 | u16 phy_reg; | 1442 | u16 phy_reg; |
1426 | 1443 | ||
1427 | rctl = E1000_READ_REG(hw, RCTL); | 1444 | rctl = er32(RCTL); |
1428 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | 1445 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); |
1429 | E1000_WRITE_REG(hw, RCTL, rctl); | 1446 | ew32(RCTL, rctl); |
1430 | 1447 | ||
1431 | switch (hw->mac_type) { | 1448 | switch (hw->mac_type) { |
1432 | case e1000_82571: | 1449 | case e1000_82571: |
@@ -1434,7 +1451,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1434 | if (hw->media_type == e1000_media_type_fiber || | 1451 | if (hw->media_type == e1000_media_type_fiber || |
1435 | hw->media_type == e1000_media_type_internal_serdes) { | 1452 | hw->media_type == e1000_media_type_internal_serdes) { |
1436 | #define E1000_SERDES_LB_OFF 0x400 | 1453 | #define E1000_SERDES_LB_OFF 0x400 |
1437 | E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); | 1454 | ew32(SCTL, E1000_SERDES_LB_OFF); |
1438 | msleep(10); | 1455 | msleep(10); |
1439 | break; | 1456 | break; |
1440 | } | 1457 | } |
@@ -1484,13 +1501,14 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb, | |||
1484 | 1501 | ||
1485 | static int e1000_run_loopback_test(struct e1000_adapter *adapter) | 1502 | static int e1000_run_loopback_test(struct e1000_adapter *adapter) |
1486 | { | 1503 | { |
1504 | struct e1000_hw *hw = &adapter->hw; | ||
1487 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1505 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1488 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1506 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
1489 | struct pci_dev *pdev = adapter->pdev; | 1507 | struct pci_dev *pdev = adapter->pdev; |
1490 | int i, j, k, l, lc, good_cnt, ret_val=0; | 1508 | int i, j, k, l, lc, good_cnt, ret_val=0; |
1491 | unsigned long time; | 1509 | unsigned long time; |
1492 | 1510 | ||
1493 | E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); | 1511 | ew32(RDT, rxdr->count - 1); |
1494 | 1512 | ||
1495 | /* Calculate the loop count based on the largest descriptor ring | 1513 | /* Calculate the loop count based on the largest descriptor ring |
1496 | * The idea is to wrap the largest ring a number of times using 64 | 1514 | * The idea is to wrap the largest ring a number of times using 64 |
@@ -1513,7 +1531,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1513 | PCI_DMA_TODEVICE); | 1531 | PCI_DMA_TODEVICE); |
1514 | if (unlikely(++k == txdr->count)) k = 0; | 1532 | if (unlikely(++k == txdr->count)) k = 0; |
1515 | } | 1533 | } |
1516 | E1000_WRITE_REG(&adapter->hw, TDT, k); | 1534 | ew32(TDT, k); |
1517 | msleep(200); | 1535 | msleep(200); |
1518 | time = jiffies; /* set the start time for the receive */ | 1536 | time = jiffies; /* set the start time for the receive */ |
1519 | good_cnt = 0; | 1537 | good_cnt = 0; |
@@ -1548,9 +1566,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1548 | 1566 | ||
1549 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | 1567 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) |
1550 | { | 1568 | { |
1569 | struct e1000_hw *hw = &adapter->hw; | ||
1570 | |||
1551 | /* PHY loopback cannot be performed if SoL/IDER | 1571 | /* PHY loopback cannot be performed if SoL/IDER |
1552 | * sessions are active */ | 1572 | * sessions are active */ |
1553 | if (e1000_check_phy_reset_block(&adapter->hw)) { | 1573 | if (e1000_check_phy_reset_block(hw)) { |
1554 | DPRINTK(DRV, ERR, "Cannot do PHY loopback test " | 1574 | DPRINTK(DRV, ERR, "Cannot do PHY loopback test " |
1555 | "when SoL/IDER is active.\n"); | 1575 | "when SoL/IDER is active.\n"); |
1556 | *data = 0; | 1576 | *data = 0; |
@@ -1572,27 +1592,28 @@ out: | |||
1572 | 1592 | ||
1573 | static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) | 1593 | static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) |
1574 | { | 1594 | { |
1595 | struct e1000_hw *hw = &adapter->hw; | ||
1575 | *data = 0; | 1596 | *data = 0; |
1576 | if (adapter->hw.media_type == e1000_media_type_internal_serdes) { | 1597 | if (hw->media_type == e1000_media_type_internal_serdes) { |
1577 | int i = 0; | 1598 | int i = 0; |
1578 | adapter->hw.serdes_link_down = true; | 1599 | hw->serdes_link_down = true; |
1579 | 1600 | ||
1580 | /* On some blade server designs, link establishment | 1601 | /* On some blade server designs, link establishment |
1581 | * could take as long as 2-3 minutes */ | 1602 | * could take as long as 2-3 minutes */ |
1582 | do { | 1603 | do { |
1583 | e1000_check_for_link(&adapter->hw); | 1604 | e1000_check_for_link(hw); |
1584 | if (!adapter->hw.serdes_link_down) | 1605 | if (!hw->serdes_link_down) |
1585 | return *data; | 1606 | return *data; |
1586 | msleep(20); | 1607 | msleep(20); |
1587 | } while (i++ < 3750); | 1608 | } while (i++ < 3750); |
1588 | 1609 | ||
1589 | *data = 1; | 1610 | *data = 1; |
1590 | } else { | 1611 | } else { |
1591 | e1000_check_for_link(&adapter->hw); | 1612 | e1000_check_for_link(hw); |
1592 | if (adapter->hw.autoneg) /* if auto_neg is set wait for it */ | 1613 | if (hw->autoneg) /* if auto_neg is set wait for it */ |
1593 | msleep(4000); | 1614 | msleep(4000); |
1594 | 1615 | ||
1595 | if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { | 1616 | if (!(er32(STATUS) & E1000_STATUS_LU)) { |
1596 | *data = 1; | 1617 | *data = 1; |
1597 | } | 1618 | } |
1598 | } | 1619 | } |
@@ -1615,6 +1636,7 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1615 | struct ethtool_test *eth_test, u64 *data) | 1636 | struct ethtool_test *eth_test, u64 *data) |
1616 | { | 1637 | { |
1617 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1638 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1639 | struct e1000_hw *hw = &adapter->hw; | ||
1618 | bool if_running = netif_running(netdev); | 1640 | bool if_running = netif_running(netdev); |
1619 | 1641 | ||
1620 | set_bit(__E1000_TESTING, &adapter->flags); | 1642 | set_bit(__E1000_TESTING, &adapter->flags); |
@@ -1622,9 +1644,9 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1622 | /* Offline tests */ | 1644 | /* Offline tests */ |
1623 | 1645 | ||
1624 | /* save speed, duplex, autoneg settings */ | 1646 | /* save speed, duplex, autoneg settings */ |
1625 | u16 autoneg_advertised = adapter->hw.autoneg_advertised; | 1647 | u16 autoneg_advertised = hw->autoneg_advertised; |
1626 | u8 forced_speed_duplex = adapter->hw.forced_speed_duplex; | 1648 | u8 forced_speed_duplex = hw->forced_speed_duplex; |
1627 | u8 autoneg = adapter->hw.autoneg; | 1649 | u8 autoneg = hw->autoneg; |
1628 | 1650 | ||
1629 | DPRINTK(HW, INFO, "offline testing starting\n"); | 1651 | DPRINTK(HW, INFO, "offline testing starting\n"); |
1630 | 1652 | ||
@@ -1657,9 +1679,9 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1657 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1679 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1658 | 1680 | ||
1659 | /* restore speed, duplex, autoneg settings */ | 1681 | /* restore speed, duplex, autoneg settings */ |
1660 | adapter->hw.autoneg_advertised = autoneg_advertised; | 1682 | hw->autoneg_advertised = autoneg_advertised; |
1661 | adapter->hw.forced_speed_duplex = forced_speed_duplex; | 1683 | hw->forced_speed_duplex = forced_speed_duplex; |
1662 | adapter->hw.autoneg = autoneg; | 1684 | hw->autoneg = autoneg; |
1663 | 1685 | ||
1664 | e1000_reset(adapter); | 1686 | e1000_reset(adapter); |
1665 | clear_bit(__E1000_TESTING, &adapter->flags); | 1687 | clear_bit(__E1000_TESTING, &adapter->flags); |
@@ -1708,7 +1730,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, | |||
1708 | case E1000_DEV_ID_82571EB_SERDES: | 1730 | case E1000_DEV_ID_82571EB_SERDES: |
1709 | case E1000_DEV_ID_82571EB_COPPER: | 1731 | case E1000_DEV_ID_82571EB_COPPER: |
1710 | /* Wake events not supported on port B */ | 1732 | /* Wake events not supported on port B */ |
1711 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { | 1733 | if (er32(STATUS) & E1000_STATUS_FUNC_1) { |
1712 | wol->supported = 0; | 1734 | wol->supported = 0; |
1713 | break; | 1735 | break; |
1714 | } | 1736 | } |
@@ -1732,7 +1754,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, | |||
1732 | /* dual port cards only support WoL on port A from now on | 1754 | /* dual port cards only support WoL on port A from now on |
1733 | * unless it was enabled in the eeprom for port B | 1755 | * unless it was enabled in the eeprom for port B |
1734 | * so exclude FUNC_1 ports from having WoL enabled */ | 1756 | * so exclude FUNC_1 ports from having WoL enabled */ |
1735 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 && | 1757 | if (er32(STATUS) & E1000_STATUS_FUNC_1 && |
1736 | !adapter->eeprom_wol) { | 1758 | !adapter->eeprom_wol) { |
1737 | wol->supported = 0; | 1759 | wol->supported = 0; |
1738 | break; | 1760 | break; |
@@ -1748,6 +1770,7 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1748 | struct ethtool_wolinfo *wol) | 1770 | struct ethtool_wolinfo *wol) |
1749 | { | 1771 | { |
1750 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1772 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1773 | struct e1000_hw *hw = &adapter->hw; | ||
1751 | 1774 | ||
1752 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1775 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1753 | WAKE_BCAST | WAKE_MAGIC; | 1776 | WAKE_BCAST | WAKE_MAGIC; |
@@ -1759,7 +1782,7 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1759 | return; | 1782 | return; |
1760 | 1783 | ||
1761 | /* apply any specific unsupported masks here */ | 1784 | /* apply any specific unsupported masks here */ |
1762 | switch (adapter->hw.device_id) { | 1785 | switch (hw->device_id) { |
1763 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1786 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1764 | /* KSP3 does not suppport UCAST wake-ups */ | 1787 | /* KSP3 does not suppport UCAST wake-ups */ |
1765 | wol->supported &= ~WAKE_UCAST; | 1788 | wol->supported &= ~WAKE_UCAST; |
@@ -1831,11 +1854,12 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1831 | static void e1000_led_blink_callback(unsigned long data) | 1854 | static void e1000_led_blink_callback(unsigned long data) |
1832 | { | 1855 | { |
1833 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 1856 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
1857 | struct e1000_hw *hw = &adapter->hw; | ||
1834 | 1858 | ||
1835 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) | 1859 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) |
1836 | e1000_led_off(&adapter->hw); | 1860 | e1000_led_off(hw); |
1837 | else | 1861 | else |
1838 | e1000_led_on(&adapter->hw); | 1862 | e1000_led_on(hw); |
1839 | 1863 | ||
1840 | mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); | 1864 | mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); |
1841 | } | 1865 | } |
@@ -1843,21 +1867,22 @@ static void e1000_led_blink_callback(unsigned long data) | |||
1843 | static int e1000_phys_id(struct net_device *netdev, u32 data) | 1867 | static int e1000_phys_id(struct net_device *netdev, u32 data) |
1844 | { | 1868 | { |
1845 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1869 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1870 | struct e1000_hw *hw = &adapter->hw; | ||
1846 | 1871 | ||
1847 | if (!data) | 1872 | if (!data) |
1848 | data = INT_MAX; | 1873 | data = INT_MAX; |
1849 | 1874 | ||
1850 | if (adapter->hw.mac_type < e1000_82571) { | 1875 | if (hw->mac_type < e1000_82571) { |
1851 | if (!adapter->blink_timer.function) { | 1876 | if (!adapter->blink_timer.function) { |
1852 | init_timer(&adapter->blink_timer); | 1877 | init_timer(&adapter->blink_timer); |
1853 | adapter->blink_timer.function = e1000_led_blink_callback; | 1878 | adapter->blink_timer.function = e1000_led_blink_callback; |
1854 | adapter->blink_timer.data = (unsigned long) adapter; | 1879 | adapter->blink_timer.data = (unsigned long) adapter; |
1855 | } | 1880 | } |
1856 | e1000_setup_led(&adapter->hw); | 1881 | e1000_setup_led(hw); |
1857 | mod_timer(&adapter->blink_timer, jiffies); | 1882 | mod_timer(&adapter->blink_timer, jiffies); |
1858 | msleep_interruptible(data * 1000); | 1883 | msleep_interruptible(data * 1000); |
1859 | del_timer_sync(&adapter->blink_timer); | 1884 | del_timer_sync(&adapter->blink_timer); |
1860 | } else if (adapter->hw.phy_type == e1000_phy_ife) { | 1885 | } else if (hw->phy_type == e1000_phy_ife) { |
1861 | if (!adapter->blink_timer.function) { | 1886 | if (!adapter->blink_timer.function) { |
1862 | init_timer(&adapter->blink_timer); | 1887 | init_timer(&adapter->blink_timer); |
1863 | adapter->blink_timer.function = e1000_led_blink_callback; | 1888 | adapter->blink_timer.function = e1000_led_blink_callback; |
@@ -1868,13 +1893,13 @@ static int e1000_phys_id(struct net_device *netdev, u32 data) | |||
1868 | del_timer_sync(&adapter->blink_timer); | 1893 | del_timer_sync(&adapter->blink_timer); |
1869 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); | 1894 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); |
1870 | } else { | 1895 | } else { |
1871 | e1000_blink_led_start(&adapter->hw); | 1896 | e1000_blink_led_start(hw); |
1872 | msleep_interruptible(data * 1000); | 1897 | msleep_interruptible(data * 1000); |
1873 | } | 1898 | } |
1874 | 1899 | ||
1875 | e1000_led_off(&adapter->hw); | 1900 | e1000_led_off(hw); |
1876 | clear_bit(E1000_LED_ON, &adapter->led_status); | 1901 | clear_bit(E1000_LED_ON, &adapter->led_status); |
1877 | e1000_cleanup_led(&adapter->hw); | 1902 | e1000_cleanup_led(hw); |
1878 | 1903 | ||
1879 | return 0; | 1904 | return 0; |
1880 | } | 1905 | } |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index d6c272ae437f..5d3c2bd7b612 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -520,7 +520,7 @@ void e1000_set_media_type(struct e1000_hw *hw) | |||
520 | hw->media_type = e1000_media_type_copper; | 520 | hw->media_type = e1000_media_type_copper; |
521 | break; | 521 | break; |
522 | default: | 522 | default: |
523 | status = E1000_READ_REG(hw, STATUS); | 523 | status = er32(STATUS); |
524 | if (status & E1000_STATUS_TBIMODE) { | 524 | if (status & E1000_STATUS_TBIMODE) { |
525 | hw->media_type = e1000_media_type_fiber; | 525 | hw->media_type = e1000_media_type_fiber; |
526 | /* tbi_compatibility not valid on fiber */ | 526 | /* tbi_compatibility not valid on fiber */ |
@@ -568,15 +568,15 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
568 | 568 | ||
569 | /* Clear interrupt mask to stop board from generating interrupts */ | 569 | /* Clear interrupt mask to stop board from generating interrupts */ |
570 | DEBUGOUT("Masking off all interrupts\n"); | 570 | DEBUGOUT("Masking off all interrupts\n"); |
571 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 571 | ew32(IMC, 0xffffffff); |
572 | 572 | ||
573 | /* Disable the Transmit and Receive units. Then delay to allow | 573 | /* Disable the Transmit and Receive units. Then delay to allow |
574 | * any pending transactions to complete before we hit the MAC with | 574 | * any pending transactions to complete before we hit the MAC with |
575 | * the global reset. | 575 | * the global reset. |
576 | */ | 576 | */ |
577 | E1000_WRITE_REG(hw, RCTL, 0); | 577 | ew32(RCTL, 0); |
578 | E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP); | 578 | ew32(TCTL, E1000_TCTL_PSP); |
579 | E1000_WRITE_FLUSH(hw); | 579 | E1000_WRITE_FLUSH(); |
580 | 580 | ||
581 | /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ | 581 | /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ |
582 | hw->tbi_compatibility_on = false; | 582 | hw->tbi_compatibility_on = false; |
@@ -586,11 +586,11 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
586 | */ | 586 | */ |
587 | msleep(10); | 587 | msleep(10); |
588 | 588 | ||
589 | ctrl = E1000_READ_REG(hw, CTRL); | 589 | ctrl = er32(CTRL); |
590 | 590 | ||
591 | /* Must reset the PHY before resetting the MAC */ | 591 | /* Must reset the PHY before resetting the MAC */ |
592 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 592 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
593 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); | 593 | ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); |
594 | msleep(5); | 594 | msleep(5); |
595 | } | 595 | } |
596 | 596 | ||
@@ -599,12 +599,12 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
599 | if (hw->mac_type == e1000_82573) { | 599 | if (hw->mac_type == e1000_82573) { |
600 | timeout = 10; | 600 | timeout = 10; |
601 | 601 | ||
602 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 602 | extcnf_ctrl = er32(EXTCNF_CTRL); |
603 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | 603 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; |
604 | 604 | ||
605 | do { | 605 | do { |
606 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 606 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
607 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 607 | extcnf_ctrl = er32(EXTCNF_CTRL); |
608 | 608 | ||
609 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | 609 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) |
610 | break; | 610 | break; |
@@ -619,9 +619,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
619 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ | 619 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ |
620 | if (hw->mac_type == e1000_ich8lan) { | 620 | if (hw->mac_type == e1000_ich8lan) { |
621 | /* Set Tx and Rx buffer allocation to 8k apiece. */ | 621 | /* Set Tx and Rx buffer allocation to 8k apiece. */ |
622 | E1000_WRITE_REG(hw, PBA, E1000_PBA_8K); | 622 | ew32(PBA, E1000_PBA_8K); |
623 | /* Set Packet Buffer Size to 16k. */ | 623 | /* Set Packet Buffer Size to 16k. */ |
624 | E1000_WRITE_REG(hw, PBS, E1000_PBS_16K); | 624 | ew32(PBS, E1000_PBS_16K); |
625 | } | 625 | } |
626 | 626 | ||
627 | /* Issue a global reset to the MAC. This will reset the chip's | 627 | /* Issue a global reset to the MAC. This will reset the chip's |
@@ -645,7 +645,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
645 | case e1000_82545_rev_3: | 645 | case e1000_82545_rev_3: |
646 | case e1000_82546_rev_3: | 646 | case e1000_82546_rev_3: |
647 | /* Reset is performed on a shadow of the control register */ | 647 | /* Reset is performed on a shadow of the control register */ |
648 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); | 648 | ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); |
649 | break; | 649 | break; |
650 | case e1000_ich8lan: | 650 | case e1000_ich8lan: |
651 | if (!hw->phy_reset_disable && | 651 | if (!hw->phy_reset_disable && |
@@ -658,11 +658,11 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
658 | } | 658 | } |
659 | 659 | ||
660 | e1000_get_software_flag(hw); | 660 | e1000_get_software_flag(hw); |
661 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 661 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
662 | msleep(5); | 662 | msleep(5); |
663 | break; | 663 | break; |
664 | default: | 664 | default: |
665 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 665 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
666 | break; | 666 | break; |
667 | } | 667 | } |
668 | 668 | ||
@@ -677,10 +677,10 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
677 | case e1000_82544: | 677 | case e1000_82544: |
678 | /* Wait for reset to complete */ | 678 | /* Wait for reset to complete */ |
679 | udelay(10); | 679 | udelay(10); |
680 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 680 | ctrl_ext = er32(CTRL_EXT); |
681 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 681 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
682 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 682 | ew32(CTRL_EXT, ctrl_ext); |
683 | E1000_WRITE_FLUSH(hw); | 683 | E1000_WRITE_FLUSH(); |
684 | /* Wait for EEPROM reload */ | 684 | /* Wait for EEPROM reload */ |
685 | msleep(2); | 685 | msleep(2); |
686 | break; | 686 | break; |
@@ -694,10 +694,10 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
694 | case e1000_82573: | 694 | case e1000_82573: |
695 | if (!e1000_is_onboard_nvm_eeprom(hw)) { | 695 | if (!e1000_is_onboard_nvm_eeprom(hw)) { |
696 | udelay(10); | 696 | udelay(10); |
697 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 697 | ctrl_ext = er32(CTRL_EXT); |
698 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 698 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
699 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 699 | ew32(CTRL_EXT, ctrl_ext); |
700 | E1000_WRITE_FLUSH(hw); | 700 | E1000_WRITE_FLUSH(); |
701 | } | 701 | } |
702 | /* fall through */ | 702 | /* fall through */ |
703 | default: | 703 | default: |
@@ -710,27 +710,27 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
710 | 710 | ||
711 | /* Disable HW ARPs on ASF enabled adapters */ | 711 | /* Disable HW ARPs on ASF enabled adapters */ |
712 | if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { | 712 | if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { |
713 | manc = E1000_READ_REG(hw, MANC); | 713 | manc = er32(MANC); |
714 | manc &= ~(E1000_MANC_ARP_EN); | 714 | manc &= ~(E1000_MANC_ARP_EN); |
715 | E1000_WRITE_REG(hw, MANC, manc); | 715 | ew32(MANC, manc); |
716 | } | 716 | } |
717 | 717 | ||
718 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 718 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
719 | e1000_phy_init_script(hw); | 719 | e1000_phy_init_script(hw); |
720 | 720 | ||
721 | /* Configure activity LED after PHY reset */ | 721 | /* Configure activity LED after PHY reset */ |
722 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 722 | led_ctrl = er32(LEDCTL); |
723 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 723 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
724 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 724 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
725 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 725 | ew32(LEDCTL, led_ctrl); |
726 | } | 726 | } |
727 | 727 | ||
728 | /* Clear interrupt mask to stop board from generating interrupts */ | 728 | /* Clear interrupt mask to stop board from generating interrupts */ |
729 | DEBUGOUT("Masking off all interrupts\n"); | 729 | DEBUGOUT("Masking off all interrupts\n"); |
730 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 730 | ew32(IMC, 0xffffffff); |
731 | 731 | ||
732 | /* Clear any pending interrupt events. */ | 732 | /* Clear any pending interrupt events. */ |
733 | icr = E1000_READ_REG(hw, ICR); | 733 | icr = er32(ICR); |
734 | 734 | ||
735 | /* If MWI was previously enabled, reenable it. */ | 735 | /* If MWI was previously enabled, reenable it. */ |
736 | if (hw->mac_type == e1000_82542_rev2_0) { | 736 | if (hw->mac_type == e1000_82542_rev2_0) { |
@@ -739,9 +739,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
739 | } | 739 | } |
740 | 740 | ||
741 | if (hw->mac_type == e1000_ich8lan) { | 741 | if (hw->mac_type == e1000_ich8lan) { |
742 | u32 kab = E1000_READ_REG(hw, KABGTXD); | 742 | u32 kab = er32(KABGTXD); |
743 | kab |= E1000_KABGTXD_BGSQLBIAS; | 743 | kab |= E1000_KABGTXD_BGSQLBIAS; |
744 | E1000_WRITE_REG(hw, KABGTXD, kab); | 744 | ew32(KABGTXD, kab); |
745 | } | 745 | } |
746 | 746 | ||
747 | return E1000_SUCCESS; | 747 | return E1000_SUCCESS; |
@@ -766,22 +766,22 @@ static void e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
766 | u32 reg_txdctl, reg_txdctl1; | 766 | u32 reg_txdctl, reg_txdctl1; |
767 | 767 | ||
768 | /* link autonegotiation/sync workarounds */ | 768 | /* link autonegotiation/sync workarounds */ |
769 | reg_tarc0 = E1000_READ_REG(hw, TARC0); | 769 | reg_tarc0 = er32(TARC0); |
770 | reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); | 770 | reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); |
771 | 771 | ||
772 | /* Enable not-done TX descriptor counting */ | 772 | /* Enable not-done TX descriptor counting */ |
773 | reg_txdctl = E1000_READ_REG(hw, TXDCTL); | 773 | reg_txdctl = er32(TXDCTL); |
774 | reg_txdctl |= E1000_TXDCTL_COUNT_DESC; | 774 | reg_txdctl |= E1000_TXDCTL_COUNT_DESC; |
775 | E1000_WRITE_REG(hw, TXDCTL, reg_txdctl); | 775 | ew32(TXDCTL, reg_txdctl); |
776 | reg_txdctl1 = E1000_READ_REG(hw, TXDCTL1); | 776 | reg_txdctl1 = er32(TXDCTL1); |
777 | reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; | 777 | reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; |
778 | E1000_WRITE_REG(hw, TXDCTL1, reg_txdctl1); | 778 | ew32(TXDCTL1, reg_txdctl1); |
779 | 779 | ||
780 | switch (hw->mac_type) { | 780 | switch (hw->mac_type) { |
781 | case e1000_82571: | 781 | case e1000_82571: |
782 | case e1000_82572: | 782 | case e1000_82572: |
783 | /* Clear PHY TX compatible mode bits */ | 783 | /* Clear PHY TX compatible mode bits */ |
784 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 784 | reg_tarc1 = er32(TARC1); |
785 | reg_tarc1 &= ~((1 << 30)|(1 << 29)); | 785 | reg_tarc1 &= ~((1 << 30)|(1 << 29)); |
786 | 786 | ||
787 | /* link autonegotiation/sync workarounds */ | 787 | /* link autonegotiation/sync workarounds */ |
@@ -791,25 +791,25 @@ static void e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
791 | reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); | 791 | reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); |
792 | 792 | ||
793 | /* Multiple read bit is reversed polarity */ | 793 | /* Multiple read bit is reversed polarity */ |
794 | reg_tctl = E1000_READ_REG(hw, TCTL); | 794 | reg_tctl = er32(TCTL); |
795 | if (reg_tctl & E1000_TCTL_MULR) | 795 | if (reg_tctl & E1000_TCTL_MULR) |
796 | reg_tarc1 &= ~(1 << 28); | 796 | reg_tarc1 &= ~(1 << 28); |
797 | else | 797 | else |
798 | reg_tarc1 |= (1 << 28); | 798 | reg_tarc1 |= (1 << 28); |
799 | 799 | ||
800 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 800 | ew32(TARC1, reg_tarc1); |
801 | break; | 801 | break; |
802 | case e1000_82573: | 802 | case e1000_82573: |
803 | reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 803 | reg_ctrl_ext = er32(CTRL_EXT); |
804 | reg_ctrl_ext &= ~(1 << 23); | 804 | reg_ctrl_ext &= ~(1 << 23); |
805 | reg_ctrl_ext |= (1 << 22); | 805 | reg_ctrl_ext |= (1 << 22); |
806 | 806 | ||
807 | /* TX byte count fix */ | 807 | /* TX byte count fix */ |
808 | reg_ctrl = E1000_READ_REG(hw, CTRL); | 808 | reg_ctrl = er32(CTRL); |
809 | reg_ctrl &= ~(1 << 29); | 809 | reg_ctrl &= ~(1 << 29); |
810 | 810 | ||
811 | E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext); | 811 | ew32(CTRL_EXT, reg_ctrl_ext); |
812 | E1000_WRITE_REG(hw, CTRL, reg_ctrl); | 812 | ew32(CTRL, reg_ctrl); |
813 | break; | 813 | break; |
814 | case e1000_80003es2lan: | 814 | case e1000_80003es2lan: |
815 | /* improve small packet performace for fiber/serdes */ | 815 | /* improve small packet performace for fiber/serdes */ |
@@ -819,14 +819,14 @@ static void e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
819 | } | 819 | } |
820 | 820 | ||
821 | /* Multiple read bit is reversed polarity */ | 821 | /* Multiple read bit is reversed polarity */ |
822 | reg_tctl = E1000_READ_REG(hw, TCTL); | 822 | reg_tctl = er32(TCTL); |
823 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 823 | reg_tarc1 = er32(TARC1); |
824 | if (reg_tctl & E1000_TCTL_MULR) | 824 | if (reg_tctl & E1000_TCTL_MULR) |
825 | reg_tarc1 &= ~(1 << 28); | 825 | reg_tarc1 &= ~(1 << 28); |
826 | else | 826 | else |
827 | reg_tarc1 |= (1 << 28); | 827 | reg_tarc1 |= (1 << 28); |
828 | 828 | ||
829 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 829 | ew32(TARC1, reg_tarc1); |
830 | break; | 830 | break; |
831 | case e1000_ich8lan: | 831 | case e1000_ich8lan: |
832 | /* Reduce concurrent DMA requests to 3 from 4 */ | 832 | /* Reduce concurrent DMA requests to 3 from 4 */ |
@@ -835,16 +835,16 @@ static void e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
835 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) | 835 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) |
836 | reg_tarc0 |= ((1 << 29)|(1 << 28)); | 836 | reg_tarc0 |= ((1 << 29)|(1 << 28)); |
837 | 837 | ||
838 | reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 838 | reg_ctrl_ext = er32(CTRL_EXT); |
839 | reg_ctrl_ext |= (1 << 22); | 839 | reg_ctrl_ext |= (1 << 22); |
840 | E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext); | 840 | ew32(CTRL_EXT, reg_ctrl_ext); |
841 | 841 | ||
842 | /* workaround TX hang with TSO=on */ | 842 | /* workaround TX hang with TSO=on */ |
843 | reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); | 843 | reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); |
844 | 844 | ||
845 | /* Multiple read bit is reversed polarity */ | 845 | /* Multiple read bit is reversed polarity */ |
846 | reg_tctl = E1000_READ_REG(hw, TCTL); | 846 | reg_tctl = er32(TCTL); |
847 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 847 | reg_tarc1 = er32(TARC1); |
848 | if (reg_tctl & E1000_TCTL_MULR) | 848 | if (reg_tctl & E1000_TCTL_MULR) |
849 | reg_tarc1 &= ~(1 << 28); | 849 | reg_tarc1 &= ~(1 << 28); |
850 | else | 850 | else |
@@ -853,13 +853,13 @@ static void e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
853 | /* workaround TX hang with TSO=on */ | 853 | /* workaround TX hang with TSO=on */ |
854 | reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); | 854 | reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); |
855 | 855 | ||
856 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 856 | ew32(TARC1, reg_tarc1); |
857 | break; | 857 | break; |
858 | default: | 858 | default: |
859 | break; | 859 | break; |
860 | } | 860 | } |
861 | 861 | ||
862 | E1000_WRITE_REG(hw, TARC0, reg_tarc0); | 862 | ew32(TARC0, reg_tarc0); |
863 | } | 863 | } |
864 | } | 864 | } |
865 | 865 | ||
@@ -890,9 +890,9 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
890 | ((hw->revision_id < 3) || | 890 | ((hw->revision_id < 3) || |
891 | ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && | 891 | ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && |
892 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { | 892 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { |
893 | reg_data = E1000_READ_REG(hw, STATUS); | 893 | reg_data = er32(STATUS); |
894 | reg_data &= ~0x80000000; | 894 | reg_data &= ~0x80000000; |
895 | E1000_WRITE_REG(hw, STATUS, reg_data); | 895 | ew32(STATUS, reg_data); |
896 | } | 896 | } |
897 | 897 | ||
898 | /* Initialize Identification LED */ | 898 | /* Initialize Identification LED */ |
@@ -913,7 +913,7 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
913 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ | 913 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ |
914 | if (hw->mac_type != e1000_ich8lan) { | 914 | if (hw->mac_type != e1000_ich8lan) { |
915 | if (hw->mac_type < e1000_82545_rev_3) | 915 | if (hw->mac_type < e1000_82545_rev_3) |
916 | E1000_WRITE_REG(hw, VET, 0); | 916 | ew32(VET, 0); |
917 | e1000_clear_vfta(hw); | 917 | e1000_clear_vfta(hw); |
918 | } | 918 | } |
919 | 919 | ||
@@ -921,8 +921,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
921 | if (hw->mac_type == e1000_82542_rev2_0) { | 921 | if (hw->mac_type == e1000_82542_rev2_0) { |
922 | DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); | 922 | DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); |
923 | e1000_pci_clear_mwi(hw); | 923 | e1000_pci_clear_mwi(hw); |
924 | E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST); | 924 | ew32(RCTL, E1000_RCTL_RST); |
925 | E1000_WRITE_FLUSH(hw); | 925 | E1000_WRITE_FLUSH(); |
926 | msleep(5); | 926 | msleep(5); |
927 | } | 927 | } |
928 | 928 | ||
@@ -933,8 +933,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
933 | 933 | ||
934 | /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ | 934 | /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ |
935 | if (hw->mac_type == e1000_82542_rev2_0) { | 935 | if (hw->mac_type == e1000_82542_rev2_0) { |
936 | E1000_WRITE_REG(hw, RCTL, 0); | 936 | ew32(RCTL, 0); |
937 | E1000_WRITE_FLUSH(hw); | 937 | E1000_WRITE_FLUSH(); |
938 | msleep(1); | 938 | msleep(1); |
939 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) | 939 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
940 | e1000_pci_set_mwi(hw); | 940 | e1000_pci_set_mwi(hw); |
@@ -949,7 +949,7 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
949 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 949 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
950 | /* use write flush to prevent Memory Write Block (MWB) from | 950 | /* use write flush to prevent Memory Write Block (MWB) from |
951 | * occuring when accessing our register space */ | 951 | * occuring when accessing our register space */ |
952 | E1000_WRITE_FLUSH(hw); | 952 | E1000_WRITE_FLUSH(); |
953 | } | 953 | } |
954 | 954 | ||
955 | /* Set the PCI priority bit correctly in the CTRL register. This | 955 | /* Set the PCI priority bit correctly in the CTRL register. This |
@@ -958,8 +958,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
958 | * 82542 and 82543 silicon. | 958 | * 82542 and 82543 silicon. |
959 | */ | 959 | */ |
960 | if (hw->dma_fairness && hw->mac_type <= e1000_82543) { | 960 | if (hw->dma_fairness && hw->mac_type <= e1000_82543) { |
961 | ctrl = E1000_READ_REG(hw, CTRL); | 961 | ctrl = er32(CTRL); |
962 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); | 962 | ew32(CTRL, ctrl | E1000_CTRL_PRIOR); |
963 | } | 963 | } |
964 | 964 | ||
965 | switch (hw->mac_type) { | 965 | switch (hw->mac_type) { |
@@ -982,9 +982,9 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
982 | 982 | ||
983 | /* Set the transmit descriptor write-back policy */ | 983 | /* Set the transmit descriptor write-back policy */ |
984 | if (hw->mac_type > e1000_82544) { | 984 | if (hw->mac_type > e1000_82544) { |
985 | ctrl = E1000_READ_REG(hw, TXDCTL); | 985 | ctrl = er32(TXDCTL); |
986 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 986 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
987 | E1000_WRITE_REG(hw, TXDCTL, ctrl); | 987 | ew32(TXDCTL, ctrl); |
988 | } | 988 | } |
989 | 989 | ||
990 | if (hw->mac_type == e1000_82573) { | 990 | if (hw->mac_type == e1000_82573) { |
@@ -996,21 +996,21 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
996 | break; | 996 | break; |
997 | case e1000_80003es2lan: | 997 | case e1000_80003es2lan: |
998 | /* Enable retransmit on late collisions */ | 998 | /* Enable retransmit on late collisions */ |
999 | reg_data = E1000_READ_REG(hw, TCTL); | 999 | reg_data = er32(TCTL); |
1000 | reg_data |= E1000_TCTL_RTLC; | 1000 | reg_data |= E1000_TCTL_RTLC; |
1001 | E1000_WRITE_REG(hw, TCTL, reg_data); | 1001 | ew32(TCTL, reg_data); |
1002 | 1002 | ||
1003 | /* Configure Gigabit Carry Extend Padding */ | 1003 | /* Configure Gigabit Carry Extend Padding */ |
1004 | reg_data = E1000_READ_REG(hw, TCTL_EXT); | 1004 | reg_data = er32(TCTL_EXT); |
1005 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; | 1005 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; |
1006 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | 1006 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; |
1007 | E1000_WRITE_REG(hw, TCTL_EXT, reg_data); | 1007 | ew32(TCTL_EXT, reg_data); |
1008 | 1008 | ||
1009 | /* Configure Transmit Inter-Packet Gap */ | 1009 | /* Configure Transmit Inter-Packet Gap */ |
1010 | reg_data = E1000_READ_REG(hw, TIPG); | 1010 | reg_data = er32(TIPG); |
1011 | reg_data &= ~E1000_TIPG_IPGT_MASK; | 1011 | reg_data &= ~E1000_TIPG_IPGT_MASK; |
1012 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 1012 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
1013 | E1000_WRITE_REG(hw, TIPG, reg_data); | 1013 | ew32(TIPG, reg_data); |
1014 | 1014 | ||
1015 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); | 1015 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); |
1016 | reg_data &= ~0x00100000; | 1016 | reg_data &= ~0x00100000; |
@@ -1019,17 +1019,17 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
1019 | case e1000_82571: | 1019 | case e1000_82571: |
1020 | case e1000_82572: | 1020 | case e1000_82572: |
1021 | case e1000_ich8lan: | 1021 | case e1000_ich8lan: |
1022 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 1022 | ctrl = er32(TXDCTL1); |
1023 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 1023 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
1024 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); | 1024 | ew32(TXDCTL1, ctrl); |
1025 | break; | 1025 | break; |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | 1028 | ||
1029 | if (hw->mac_type == e1000_82573) { | 1029 | if (hw->mac_type == e1000_82573) { |
1030 | u32 gcr = E1000_READ_REG(hw, GCR); | 1030 | u32 gcr = er32(GCR); |
1031 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; | 1031 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; |
1032 | E1000_WRITE_REG(hw, GCR, gcr); | 1032 | ew32(GCR, gcr); |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | /* Clear all of the statistics registers (clear on read). It is | 1035 | /* Clear all of the statistics registers (clear on read). It is |
@@ -1046,11 +1046,11 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
1046 | 1046 | ||
1047 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || | 1047 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || |
1048 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { | 1048 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { |
1049 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 1049 | ctrl_ext = er32(CTRL_EXT); |
1050 | /* Relaxed ordering must be disabled to avoid a parity | 1050 | /* Relaxed ordering must be disabled to avoid a parity |
1051 | * error crash in a PCI slot. */ | 1051 | * error crash in a PCI slot. */ |
1052 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 1052 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
1053 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1053 | ew32(CTRL_EXT, ctrl_ext); |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | return ret_val; | 1056 | return ret_val; |
@@ -1181,7 +1181,7 @@ s32 e1000_setup_link(struct e1000_hw *hw) | |||
1181 | } | 1181 | } |
1182 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << | 1182 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << |
1183 | SWDPIO__EXT_SHIFT); | 1183 | SWDPIO__EXT_SHIFT); |
1184 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1184 | ew32(CTRL_EXT, ctrl_ext); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* Call the necessary subroutine to configure the link. */ | 1187 | /* Call the necessary subroutine to configure the link. */ |
@@ -1198,12 +1198,12 @@ s32 e1000_setup_link(struct e1000_hw *hw) | |||
1198 | 1198 | ||
1199 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ | 1199 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ |
1200 | if (hw->mac_type != e1000_ich8lan) { | 1200 | if (hw->mac_type != e1000_ich8lan) { |
1201 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); | 1201 | ew32(FCT, FLOW_CONTROL_TYPE); |
1202 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 1202 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
1203 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | 1203 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); | 1206 | ew32(FCTTV, hw->fc_pause_time); |
1207 | 1207 | ||
1208 | /* Set the flow control receive threshold registers. Normally, | 1208 | /* Set the flow control receive threshold registers. Normally, |
1209 | * these registers will be set to a default threshold that may be | 1209 | * these registers will be set to a default threshold that may be |
@@ -1212,18 +1212,18 @@ s32 e1000_setup_link(struct e1000_hw *hw) | |||
1212 | * registers will be set to 0. | 1212 | * registers will be set to 0. |
1213 | */ | 1213 | */ |
1214 | if (!(hw->fc & E1000_FC_TX_PAUSE)) { | 1214 | if (!(hw->fc & E1000_FC_TX_PAUSE)) { |
1215 | E1000_WRITE_REG(hw, FCRTL, 0); | 1215 | ew32(FCRTL, 0); |
1216 | E1000_WRITE_REG(hw, FCRTH, 0); | 1216 | ew32(FCRTH, 0); |
1217 | } else { | 1217 | } else { |
1218 | /* We need to set up the Receive Threshold high and low water marks | 1218 | /* We need to set up the Receive Threshold high and low water marks |
1219 | * as well as (optionally) enabling the transmission of XON frames. | 1219 | * as well as (optionally) enabling the transmission of XON frames. |
1220 | */ | 1220 | */ |
1221 | if (hw->fc_send_xon) { | 1221 | if (hw->fc_send_xon) { |
1222 | E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); | 1222 | ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); |
1223 | E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); | 1223 | ew32(FCRTH, hw->fc_high_water); |
1224 | } else { | 1224 | } else { |
1225 | E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water); | 1225 | ew32(FCRTL, hw->fc_low_water); |
1226 | E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); | 1226 | ew32(FCRTH, hw->fc_high_water); |
1227 | } | 1227 | } |
1228 | } | 1228 | } |
1229 | return ret_val; | 1229 | return ret_val; |
@@ -1255,7 +1255,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1255 | * loopback mode is disabled during initialization. | 1255 | * loopback mode is disabled during initialization. |
1256 | */ | 1256 | */ |
1257 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) | 1257 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) |
1258 | E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK); | 1258 | ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK); |
1259 | 1259 | ||
1260 | /* On adapters with a MAC newer than 82544, SWDP 1 will be | 1260 | /* On adapters with a MAC newer than 82544, SWDP 1 will be |
1261 | * set when the optics detect a signal. On older adapters, it will be | 1261 | * set when the optics detect a signal. On older adapters, it will be |
@@ -1263,7 +1263,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1263 | * If we're on serdes media, adjust the output amplitude to value | 1263 | * If we're on serdes media, adjust the output amplitude to value |
1264 | * set in the EEPROM. | 1264 | * set in the EEPROM. |
1265 | */ | 1265 | */ |
1266 | ctrl = E1000_READ_REG(hw, CTRL); | 1266 | ctrl = er32(CTRL); |
1267 | if (hw->media_type == e1000_media_type_fiber) | 1267 | if (hw->media_type == e1000_media_type_fiber) |
1268 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; | 1268 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; |
1269 | 1269 | ||
@@ -1334,9 +1334,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1334 | */ | 1334 | */ |
1335 | DEBUGOUT("Auto-negotiation enabled\n"); | 1335 | DEBUGOUT("Auto-negotiation enabled\n"); |
1336 | 1336 | ||
1337 | E1000_WRITE_REG(hw, TXCW, txcw); | 1337 | ew32(TXCW, txcw); |
1338 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1338 | ew32(CTRL, ctrl); |
1339 | E1000_WRITE_FLUSH(hw); | 1339 | E1000_WRITE_FLUSH(); |
1340 | 1340 | ||
1341 | hw->txcw = txcw; | 1341 | hw->txcw = txcw; |
1342 | msleep(1); | 1342 | msleep(1); |
@@ -1348,11 +1348,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1348 | * For internal serdes, we just assume a signal is present, then poll. | 1348 | * For internal serdes, we just assume a signal is present, then poll. |
1349 | */ | 1349 | */ |
1350 | if (hw->media_type == e1000_media_type_internal_serdes || | 1350 | if (hw->media_type == e1000_media_type_internal_serdes || |
1351 | (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) { | 1351 | (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { |
1352 | DEBUGOUT("Looking for Link\n"); | 1352 | DEBUGOUT("Looking for Link\n"); |
1353 | for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { | 1353 | for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { |
1354 | msleep(10); | 1354 | msleep(10); |
1355 | status = E1000_READ_REG(hw, STATUS); | 1355 | status = er32(STATUS); |
1356 | if (status & E1000_STATUS_LU) break; | 1356 | if (status & E1000_STATUS_LU) break; |
1357 | } | 1357 | } |
1358 | if (i == (LINK_UP_TIMEOUT / 10)) { | 1358 | if (i == (LINK_UP_TIMEOUT / 10)) { |
@@ -1392,7 +1392,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1392 | 1392 | ||
1393 | DEBUGFUNC("e1000_copper_link_preconfig"); | 1393 | DEBUGFUNC("e1000_copper_link_preconfig"); |
1394 | 1394 | ||
1395 | ctrl = E1000_READ_REG(hw, CTRL); | 1395 | ctrl = er32(CTRL); |
1396 | /* With 82543, we need to force speed and duplex on the MAC equal to what | 1396 | /* With 82543, we need to force speed and duplex on the MAC equal to what |
1397 | * the PHY speed and duplex configuration is. In addition, we need to | 1397 | * the PHY speed and duplex configuration is. In addition, we need to |
1398 | * perform a hardware reset on the PHY to take it out of reset. | 1398 | * perform a hardware reset on the PHY to take it out of reset. |
@@ -1400,10 +1400,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1400 | if (hw->mac_type > e1000_82543) { | 1400 | if (hw->mac_type > e1000_82543) { |
1401 | ctrl |= E1000_CTRL_SLU; | 1401 | ctrl |= E1000_CTRL_SLU; |
1402 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 1402 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
1403 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1403 | ew32(CTRL, ctrl); |
1404 | } else { | 1404 | } else { |
1405 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); | 1405 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); |
1406 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1406 | ew32(CTRL, ctrl); |
1407 | ret_val = e1000_phy_hw_reset(hw); | 1407 | ret_val = e1000_phy_hw_reset(hw); |
1408 | if (ret_val) | 1408 | if (ret_val) |
1409 | return ret_val; | 1409 | return ret_val; |
@@ -1464,10 +1464,10 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1464 | msleep(15); | 1464 | msleep(15); |
1465 | if (hw->mac_type != e1000_ich8lan) { | 1465 | if (hw->mac_type != e1000_ich8lan) { |
1466 | /* Configure activity LED after PHY reset */ | 1466 | /* Configure activity LED after PHY reset */ |
1467 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 1467 | led_ctrl = er32(LEDCTL); |
1468 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 1468 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
1469 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 1469 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
1470 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 1470 | ew32(LEDCTL, led_ctrl); |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ | 1473 | /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ |
@@ -1680,9 +1680,9 @@ static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1680 | if (ret_val) | 1680 | if (ret_val) |
1681 | return ret_val; | 1681 | return ret_val; |
1682 | 1682 | ||
1683 | reg_data = E1000_READ_REG(hw, CTRL_EXT); | 1683 | reg_data = er32(CTRL_EXT); |
1684 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | 1684 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); |
1685 | E1000_WRITE_REG(hw, CTRL_EXT, reg_data); | 1685 | ew32(CTRL_EXT, reg_data); |
1686 | 1686 | ||
1687 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | 1687 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, |
1688 | &phy_data); | 1688 | &phy_data); |
@@ -2074,10 +2074,10 @@ static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) | |||
2074 | return ret_val; | 2074 | return ret_val; |
2075 | 2075 | ||
2076 | /* Configure Transmit Inter-Packet Gap */ | 2076 | /* Configure Transmit Inter-Packet Gap */ |
2077 | tipg = E1000_READ_REG(hw, TIPG); | 2077 | tipg = er32(TIPG); |
2078 | tipg &= ~E1000_TIPG_IPGT_MASK; | 2078 | tipg &= ~E1000_TIPG_IPGT_MASK; |
2079 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | 2079 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; |
2080 | E1000_WRITE_REG(hw, TIPG, tipg); | 2080 | ew32(TIPG, tipg); |
2081 | 2081 | ||
2082 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | 2082 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); |
2083 | 2083 | ||
@@ -2109,10 +2109,10 @@ static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
2109 | return ret_val; | 2109 | return ret_val; |
2110 | 2110 | ||
2111 | /* Configure Transmit Inter-Packet Gap */ | 2111 | /* Configure Transmit Inter-Packet Gap */ |
2112 | tipg = E1000_READ_REG(hw, TIPG); | 2112 | tipg = er32(TIPG); |
2113 | tipg &= ~E1000_TIPG_IPGT_MASK; | 2113 | tipg &= ~E1000_TIPG_IPGT_MASK; |
2114 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 2114 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
2115 | E1000_WRITE_REG(hw, TIPG, tipg); | 2115 | ew32(TIPG, tipg); |
2116 | 2116 | ||
2117 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | 2117 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); |
2118 | 2118 | ||
@@ -2295,7 +2295,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2295 | DEBUGOUT1("hw->fc = %d\n", hw->fc); | 2295 | DEBUGOUT1("hw->fc = %d\n", hw->fc); |
2296 | 2296 | ||
2297 | /* Read the Device Control Register. */ | 2297 | /* Read the Device Control Register. */ |
2298 | ctrl = E1000_READ_REG(hw, CTRL); | 2298 | ctrl = er32(CTRL); |
2299 | 2299 | ||
2300 | /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ | 2300 | /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ |
2301 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 2301 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
@@ -2350,7 +2350,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2350 | e1000_config_collision_dist(hw); | 2350 | e1000_config_collision_dist(hw); |
2351 | 2351 | ||
2352 | /* Write the configured values back to the Device Control Reg. */ | 2352 | /* Write the configured values back to the Device Control Reg. */ |
2353 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2353 | ew32(CTRL, ctrl); |
2354 | 2354 | ||
2355 | if ((hw->phy_type == e1000_phy_m88) || | 2355 | if ((hw->phy_type == e1000_phy_m88) || |
2356 | (hw->phy_type == e1000_phy_gg82563)) { | 2356 | (hw->phy_type == e1000_phy_gg82563)) { |
@@ -2539,13 +2539,13 @@ void e1000_config_collision_dist(struct e1000_hw *hw) | |||
2539 | else | 2539 | else |
2540 | coll_dist = E1000_COLLISION_DISTANCE; | 2540 | coll_dist = E1000_COLLISION_DISTANCE; |
2541 | 2541 | ||
2542 | tctl = E1000_READ_REG(hw, TCTL); | 2542 | tctl = er32(TCTL); |
2543 | 2543 | ||
2544 | tctl &= ~E1000_TCTL_COLD; | 2544 | tctl &= ~E1000_TCTL_COLD; |
2545 | tctl |= coll_dist << E1000_COLD_SHIFT; | 2545 | tctl |= coll_dist << E1000_COLD_SHIFT; |
2546 | 2546 | ||
2547 | E1000_WRITE_REG(hw, TCTL, tctl); | 2547 | ew32(TCTL, tctl); |
2548 | E1000_WRITE_FLUSH(hw); | 2548 | E1000_WRITE_FLUSH(); |
2549 | } | 2549 | } |
2550 | 2550 | ||
2551 | /****************************************************************************** | 2551 | /****************************************************************************** |
@@ -2573,7 +2573,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2573 | /* Read the Device Control Register and set the bits to Force Speed | 2573 | /* Read the Device Control Register and set the bits to Force Speed |
2574 | * and Duplex. | 2574 | * and Duplex. |
2575 | */ | 2575 | */ |
2576 | ctrl = E1000_READ_REG(hw, CTRL); | 2576 | ctrl = er32(CTRL); |
2577 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 2577 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
2578 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); | 2578 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); |
2579 | 2579 | ||
@@ -2600,7 +2600,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2600 | ctrl |= E1000_CTRL_SPD_100; | 2600 | ctrl |= E1000_CTRL_SPD_100; |
2601 | 2601 | ||
2602 | /* Write the configured values back to the Device Control Reg. */ | 2602 | /* Write the configured values back to the Device Control Reg. */ |
2603 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2603 | ew32(CTRL, ctrl); |
2604 | return E1000_SUCCESS; | 2604 | return E1000_SUCCESS; |
2605 | } | 2605 | } |
2606 | 2606 | ||
@@ -2622,7 +2622,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw) | |||
2622 | DEBUGFUNC("e1000_force_mac_fc"); | 2622 | DEBUGFUNC("e1000_force_mac_fc"); |
2623 | 2623 | ||
2624 | /* Get the current configuration of the Device Control Register */ | 2624 | /* Get the current configuration of the Device Control Register */ |
2625 | ctrl = E1000_READ_REG(hw, CTRL); | 2625 | ctrl = er32(CTRL); |
2626 | 2626 | ||
2627 | /* Because we didn't get link via the internal auto-negotiation | 2627 | /* Because we didn't get link via the internal auto-negotiation |
2628 | * mechanism (we either forced link or we got link via PHY | 2628 | * mechanism (we either forced link or we got link via PHY |
@@ -2666,7 +2666,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw) | |||
2666 | if (hw->mac_type == e1000_82542_rev2_0) | 2666 | if (hw->mac_type == e1000_82542_rev2_0) |
2667 | ctrl &= (~E1000_CTRL_TFCE); | 2667 | ctrl &= (~E1000_CTRL_TFCE); |
2668 | 2668 | ||
2669 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2669 | ew32(CTRL, ctrl); |
2670 | return E1000_SUCCESS; | 2670 | return E1000_SUCCESS; |
2671 | } | 2671 | } |
2672 | 2672 | ||
@@ -2898,8 +2898,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2898 | 2898 | ||
2899 | DEBUGFUNC("e1000_check_for_link"); | 2899 | DEBUGFUNC("e1000_check_for_link"); |
2900 | 2900 | ||
2901 | ctrl = E1000_READ_REG(hw, CTRL); | 2901 | ctrl = er32(CTRL); |
2902 | status = E1000_READ_REG(hw, STATUS); | 2902 | status = er32(STATUS); |
2903 | 2903 | ||
2904 | /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be | 2904 | /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be |
2905 | * set when the optics detect a signal. On older adapters, it will be | 2905 | * set when the optics detect a signal. On older adapters, it will be |
@@ -2907,7 +2907,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2907 | */ | 2907 | */ |
2908 | if ((hw->media_type == e1000_media_type_fiber) || | 2908 | if ((hw->media_type == e1000_media_type_fiber) || |
2909 | (hw->media_type == e1000_media_type_internal_serdes)) { | 2909 | (hw->media_type == e1000_media_type_internal_serdes)) { |
2910 | rxcw = E1000_READ_REG(hw, RXCW); | 2910 | rxcw = er32(RXCW); |
2911 | 2911 | ||
2912 | if (hw->media_type == e1000_media_type_fiber) { | 2912 | if (hw->media_type == e1000_media_type_fiber) { |
2913 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; | 2913 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; |
@@ -2953,11 +2953,11 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2953 | (!hw->autoneg) && | 2953 | (!hw->autoneg) && |
2954 | (hw->forced_speed_duplex == e1000_10_full || | 2954 | (hw->forced_speed_duplex == e1000_10_full || |
2955 | hw->forced_speed_duplex == e1000_10_half)) { | 2955 | hw->forced_speed_duplex == e1000_10_half)) { |
2956 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 2956 | ew32(IMC, 0xffffffff); |
2957 | ret_val = e1000_polarity_reversal_workaround(hw); | 2957 | ret_val = e1000_polarity_reversal_workaround(hw); |
2958 | icr = E1000_READ_REG(hw, ICR); | 2958 | icr = er32(ICR); |
2959 | E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC)); | 2959 | ew32(ICS, (icr & ~E1000_ICS_LSC)); |
2960 | E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK); | 2960 | ew32(IMS, IMS_ENABLE_MASK); |
2961 | } | 2961 | } |
2962 | 2962 | ||
2963 | } else { | 2963 | } else { |
@@ -3022,9 +3022,9 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3022 | */ | 3022 | */ |
3023 | if (hw->tbi_compatibility_on) { | 3023 | if (hw->tbi_compatibility_on) { |
3024 | /* If we previously were in the mode, turn it off. */ | 3024 | /* If we previously were in the mode, turn it off. */ |
3025 | rctl = E1000_READ_REG(hw, RCTL); | 3025 | rctl = er32(RCTL); |
3026 | rctl &= ~E1000_RCTL_SBP; | 3026 | rctl &= ~E1000_RCTL_SBP; |
3027 | E1000_WRITE_REG(hw, RCTL, rctl); | 3027 | ew32(RCTL, rctl); |
3028 | hw->tbi_compatibility_on = false; | 3028 | hw->tbi_compatibility_on = false; |
3029 | } | 3029 | } |
3030 | } else { | 3030 | } else { |
@@ -3035,9 +3035,9 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3035 | */ | 3035 | */ |
3036 | if (!hw->tbi_compatibility_on) { | 3036 | if (!hw->tbi_compatibility_on) { |
3037 | hw->tbi_compatibility_on = true; | 3037 | hw->tbi_compatibility_on = true; |
3038 | rctl = E1000_READ_REG(hw, RCTL); | 3038 | rctl = er32(RCTL); |
3039 | rctl |= E1000_RCTL_SBP; | 3039 | rctl |= E1000_RCTL_SBP; |
3040 | E1000_WRITE_REG(hw, RCTL, rctl); | 3040 | ew32(RCTL, rctl); |
3041 | } | 3041 | } |
3042 | } | 3042 | } |
3043 | } | 3043 | } |
@@ -3061,12 +3061,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3061 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); | 3061 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); |
3062 | 3062 | ||
3063 | /* Disable auto-negotiation in the TXCW register */ | 3063 | /* Disable auto-negotiation in the TXCW register */ |
3064 | E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE)); | 3064 | ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); |
3065 | 3065 | ||
3066 | /* Force link-up and also force full-duplex. */ | 3066 | /* Force link-up and also force full-duplex. */ |
3067 | ctrl = E1000_READ_REG(hw, CTRL); | 3067 | ctrl = er32(CTRL); |
3068 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | 3068 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
3069 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3069 | ew32(CTRL, ctrl); |
3070 | 3070 | ||
3071 | /* Configure Flow Control after forcing link up. */ | 3071 | /* Configure Flow Control after forcing link up. */ |
3072 | ret_val = e1000_config_fc_after_link_up(hw); | 3072 | ret_val = e1000_config_fc_after_link_up(hw); |
@@ -3084,8 +3084,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3084 | (hw->media_type == e1000_media_type_internal_serdes)) && | 3084 | (hw->media_type == e1000_media_type_internal_serdes)) && |
3085 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 3085 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
3086 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 3086 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
3087 | E1000_WRITE_REG(hw, TXCW, hw->txcw); | 3087 | ew32(TXCW, hw->txcw); |
3088 | E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); | 3088 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
3089 | 3089 | ||
3090 | hw->serdes_link_down = false; | 3090 | hw->serdes_link_down = false; |
3091 | } | 3091 | } |
@@ -3093,10 +3093,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3093 | * based on MAC synchronization for internal serdes media type. | 3093 | * based on MAC synchronization for internal serdes media type. |
3094 | */ | 3094 | */ |
3095 | else if ((hw->media_type == e1000_media_type_internal_serdes) && | 3095 | else if ((hw->media_type == e1000_media_type_internal_serdes) && |
3096 | !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { | 3096 | !(E1000_TXCW_ANE & er32(TXCW))) { |
3097 | /* SYNCH bit and IV bit are sticky. */ | 3097 | /* SYNCH bit and IV bit are sticky. */ |
3098 | udelay(10); | 3098 | udelay(10); |
3099 | if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { | 3099 | if (E1000_RXCW_SYNCH & er32(RXCW)) { |
3100 | if (!(rxcw & E1000_RXCW_IV)) { | 3100 | if (!(rxcw & E1000_RXCW_IV)) { |
3101 | hw->serdes_link_down = false; | 3101 | hw->serdes_link_down = false; |
3102 | DEBUGOUT("SERDES: Link is up.\n"); | 3102 | DEBUGOUT("SERDES: Link is up.\n"); |
@@ -3107,8 +3107,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
3107 | } | 3107 | } |
3108 | } | 3108 | } |
3109 | if ((hw->media_type == e1000_media_type_internal_serdes) && | 3109 | if ((hw->media_type == e1000_media_type_internal_serdes) && |
3110 | (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { | 3110 | (E1000_TXCW_ANE & er32(TXCW))) { |
3111 | hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS)); | 3111 | hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS)); |
3112 | } | 3112 | } |
3113 | return E1000_SUCCESS; | 3113 | return E1000_SUCCESS; |
3114 | } | 3114 | } |
@@ -3129,7 +3129,7 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) | |||
3129 | DEBUGFUNC("e1000_get_speed_and_duplex"); | 3129 | DEBUGFUNC("e1000_get_speed_and_duplex"); |
3130 | 3130 | ||
3131 | if (hw->mac_type >= e1000_82543) { | 3131 | if (hw->mac_type >= e1000_82543) { |
3132 | status = E1000_READ_REG(hw, STATUS); | 3132 | status = er32(STATUS); |
3133 | if (status & E1000_STATUS_SPEED_1000) { | 3133 | if (status & E1000_STATUS_SPEED_1000) { |
3134 | *speed = SPEED_1000; | 3134 | *speed = SPEED_1000; |
3135 | DEBUGOUT("1000 Mbs, "); | 3135 | DEBUGOUT("1000 Mbs, "); |
@@ -3238,8 +3238,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) | |||
3238 | /* Raise the clock input to the Management Data Clock (by setting the MDC | 3238 | /* Raise the clock input to the Management Data Clock (by setting the MDC |
3239 | * bit), and then delay 10 microseconds. | 3239 | * bit), and then delay 10 microseconds. |
3240 | */ | 3240 | */ |
3241 | E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC)); | 3241 | ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); |
3242 | E1000_WRITE_FLUSH(hw); | 3242 | E1000_WRITE_FLUSH(); |
3243 | udelay(10); | 3243 | udelay(10); |
3244 | } | 3244 | } |
3245 | 3245 | ||
@@ -3254,8 +3254,8 @@ static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) | |||
3254 | /* Lower the clock input to the Management Data Clock (by clearing the MDC | 3254 | /* Lower the clock input to the Management Data Clock (by clearing the MDC |
3255 | * bit), and then delay 10 microseconds. | 3255 | * bit), and then delay 10 microseconds. |
3256 | */ | 3256 | */ |
3257 | E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC)); | 3257 | ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); |
3258 | E1000_WRITE_FLUSH(hw); | 3258 | E1000_WRITE_FLUSH(); |
3259 | udelay(10); | 3259 | udelay(10); |
3260 | } | 3260 | } |
3261 | 3261 | ||
@@ -3280,7 +3280,7 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) | |||
3280 | mask = 0x01; | 3280 | mask = 0x01; |
3281 | mask <<= (count - 1); | 3281 | mask <<= (count - 1); |
3282 | 3282 | ||
3283 | ctrl = E1000_READ_REG(hw, CTRL); | 3283 | ctrl = er32(CTRL); |
3284 | 3284 | ||
3285 | /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ | 3285 | /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ |
3286 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); | 3286 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); |
@@ -3296,8 +3296,8 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) | |||
3296 | else | 3296 | else |
3297 | ctrl &= ~E1000_CTRL_MDIO; | 3297 | ctrl &= ~E1000_CTRL_MDIO; |
3298 | 3298 | ||
3299 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3299 | ew32(CTRL, ctrl); |
3300 | E1000_WRITE_FLUSH(hw); | 3300 | E1000_WRITE_FLUSH(); |
3301 | 3301 | ||
3302 | udelay(10); | 3302 | udelay(10); |
3303 | 3303 | ||
@@ -3328,14 +3328,14 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3328 | * by raising the input to the Management Data Clock (setting the MDC bit), | 3328 | * by raising the input to the Management Data Clock (setting the MDC bit), |
3329 | * and then reading the value of the MDIO bit. | 3329 | * and then reading the value of the MDIO bit. |
3330 | */ | 3330 | */ |
3331 | ctrl = E1000_READ_REG(hw, CTRL); | 3331 | ctrl = er32(CTRL); |
3332 | 3332 | ||
3333 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ | 3333 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ |
3334 | ctrl &= ~E1000_CTRL_MDIO_DIR; | 3334 | ctrl &= ~E1000_CTRL_MDIO_DIR; |
3335 | ctrl &= ~E1000_CTRL_MDIO; | 3335 | ctrl &= ~E1000_CTRL_MDIO; |
3336 | 3336 | ||
3337 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3337 | ew32(CTRL, ctrl); |
3338 | E1000_WRITE_FLUSH(hw); | 3338 | E1000_WRITE_FLUSH(); |
3339 | 3339 | ||
3340 | /* Raise and Lower the clock before reading in the data. This accounts for | 3340 | /* Raise and Lower the clock before reading in the data. This accounts for |
3341 | * the turnaround bits. The first clock occurred when we clocked out the | 3341 | * the turnaround bits. The first clock occurred when we clocked out the |
@@ -3347,7 +3347,7 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3347 | for (data = 0, i = 0; i < 16; i++) { | 3347 | for (data = 0, i = 0; i < 16; i++) { |
3348 | data = data << 1; | 3348 | data = data << 1; |
3349 | e1000_raise_mdi_clk(hw, &ctrl); | 3349 | e1000_raise_mdi_clk(hw, &ctrl); |
3350 | ctrl = E1000_READ_REG(hw, CTRL); | 3350 | ctrl = er32(CTRL); |
3351 | /* Check to see if we shifted in a "1". */ | 3351 | /* Check to see if we shifted in a "1". */ |
3352 | if (ctrl & E1000_CTRL_MDIO) | 3352 | if (ctrl & E1000_CTRL_MDIO) |
3353 | data |= 1; | 3353 | data |= 1; |
@@ -3379,7 +3379,7 @@ static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) | |||
3379 | if (e1000_get_hw_eeprom_semaphore(hw)) | 3379 | if (e1000_get_hw_eeprom_semaphore(hw)) |
3380 | return -E1000_ERR_SWFW_SYNC; | 3380 | return -E1000_ERR_SWFW_SYNC; |
3381 | 3381 | ||
3382 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | 3382 | swfw_sync = er32(SW_FW_SYNC); |
3383 | if (!(swfw_sync & (fwmask | swmask))) { | 3383 | if (!(swfw_sync & (fwmask | swmask))) { |
3384 | break; | 3384 | break; |
3385 | } | 3385 | } |
@@ -3397,7 +3397,7 @@ static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) | |||
3397 | } | 3397 | } |
3398 | 3398 | ||
3399 | swfw_sync |= swmask; | 3399 | swfw_sync |= swmask; |
3400 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | 3400 | ew32(SW_FW_SYNC, swfw_sync); |
3401 | 3401 | ||
3402 | e1000_put_hw_eeprom_semaphore(hw); | 3402 | e1000_put_hw_eeprom_semaphore(hw); |
3403 | return E1000_SUCCESS; | 3403 | return E1000_SUCCESS; |
@@ -3425,9 +3425,9 @@ static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) | |||
3425 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); | 3425 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); |
3426 | /* empty */ | 3426 | /* empty */ |
3427 | 3427 | ||
3428 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | 3428 | swfw_sync = er32(SW_FW_SYNC); |
3429 | swfw_sync &= ~swmask; | 3429 | swfw_sync &= ~swmask; |
3430 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | 3430 | ew32(SW_FW_SYNC, swfw_sync); |
3431 | 3431 | ||
3432 | e1000_put_hw_eeprom_semaphore(hw); | 3432 | e1000_put_hw_eeprom_semaphore(hw); |
3433 | } | 3433 | } |
@@ -3446,7 +3446,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) | |||
3446 | DEBUGFUNC("e1000_read_phy_reg"); | 3446 | DEBUGFUNC("e1000_read_phy_reg"); |
3447 | 3447 | ||
3448 | if ((hw->mac_type == e1000_80003es2lan) && | 3448 | if ((hw->mac_type == e1000_80003es2lan) && |
3449 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3449 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3450 | swfw = E1000_SWFW_PHY1_SM; | 3450 | swfw = E1000_SWFW_PHY1_SM; |
3451 | } else { | 3451 | } else { |
3452 | swfw = E1000_SWFW_PHY0_SM; | 3452 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3517,12 +3517,12 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3517 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3517 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3518 | (E1000_MDIC_OP_READ)); | 3518 | (E1000_MDIC_OP_READ)); |
3519 | 3519 | ||
3520 | E1000_WRITE_REG(hw, MDIC, mdic); | 3520 | ew32(MDIC, mdic); |
3521 | 3521 | ||
3522 | /* Poll the ready bit to see if the MDI read completed */ | 3522 | /* Poll the ready bit to see if the MDI read completed */ |
3523 | for (i = 0; i < 64; i++) { | 3523 | for (i = 0; i < 64; i++) { |
3524 | udelay(50); | 3524 | udelay(50); |
3525 | mdic = E1000_READ_REG(hw, MDIC); | 3525 | mdic = er32(MDIC); |
3526 | if (mdic & E1000_MDIC_READY) break; | 3526 | if (mdic & E1000_MDIC_READY) break; |
3527 | } | 3527 | } |
3528 | if (!(mdic & E1000_MDIC_READY)) { | 3528 | if (!(mdic & E1000_MDIC_READY)) { |
@@ -3581,7 +3581,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) | |||
3581 | DEBUGFUNC("e1000_write_phy_reg"); | 3581 | DEBUGFUNC("e1000_write_phy_reg"); |
3582 | 3582 | ||
3583 | if ((hw->mac_type == e1000_80003es2lan) && | 3583 | if ((hw->mac_type == e1000_80003es2lan) && |
3584 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3584 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3585 | swfw = E1000_SWFW_PHY1_SM; | 3585 | swfw = E1000_SWFW_PHY1_SM; |
3586 | } else { | 3586 | } else { |
3587 | swfw = E1000_SWFW_PHY0_SM; | 3587 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3653,12 +3653,12 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3653 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3653 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3654 | (E1000_MDIC_OP_WRITE)); | 3654 | (E1000_MDIC_OP_WRITE)); |
3655 | 3655 | ||
3656 | E1000_WRITE_REG(hw, MDIC, mdic); | 3656 | ew32(MDIC, mdic); |
3657 | 3657 | ||
3658 | /* Poll the ready bit to see if the MDI read completed */ | 3658 | /* Poll the ready bit to see if the MDI read completed */ |
3659 | for (i = 0; i < 641; i++) { | 3659 | for (i = 0; i < 641; i++) { |
3660 | udelay(5); | 3660 | udelay(5); |
3661 | mdic = E1000_READ_REG(hw, MDIC); | 3661 | mdic = er32(MDIC); |
3662 | if (mdic & E1000_MDIC_READY) break; | 3662 | if (mdic & E1000_MDIC_READY) break; |
3663 | } | 3663 | } |
3664 | if (!(mdic & E1000_MDIC_READY)) { | 3664 | if (!(mdic & E1000_MDIC_READY)) { |
@@ -3697,7 +3697,7 @@ static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data) | |||
3697 | DEBUGFUNC("e1000_read_kmrn_reg"); | 3697 | DEBUGFUNC("e1000_read_kmrn_reg"); |
3698 | 3698 | ||
3699 | if ((hw->mac_type == e1000_80003es2lan) && | 3699 | if ((hw->mac_type == e1000_80003es2lan) && |
3700 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3700 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3701 | swfw = E1000_SWFW_PHY1_SM; | 3701 | swfw = E1000_SWFW_PHY1_SM; |
3702 | } else { | 3702 | } else { |
3703 | swfw = E1000_SWFW_PHY0_SM; | 3703 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3709,11 +3709,11 @@ static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data) | |||
3709 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | 3709 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & |
3710 | E1000_KUMCTRLSTA_OFFSET) | | 3710 | E1000_KUMCTRLSTA_OFFSET) | |
3711 | E1000_KUMCTRLSTA_REN; | 3711 | E1000_KUMCTRLSTA_REN; |
3712 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | 3712 | ew32(KUMCTRLSTA, reg_val); |
3713 | udelay(2); | 3713 | udelay(2); |
3714 | 3714 | ||
3715 | /* Read the data returned */ | 3715 | /* Read the data returned */ |
3716 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); | 3716 | reg_val = er32(KUMCTRLSTA); |
3717 | *data = (u16)reg_val; | 3717 | *data = (u16)reg_val; |
3718 | 3718 | ||
3719 | e1000_swfw_sync_release(hw, swfw); | 3719 | e1000_swfw_sync_release(hw, swfw); |
@@ -3727,7 +3727,7 @@ static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data) | |||
3727 | DEBUGFUNC("e1000_write_kmrn_reg"); | 3727 | DEBUGFUNC("e1000_write_kmrn_reg"); |
3728 | 3728 | ||
3729 | if ((hw->mac_type == e1000_80003es2lan) && | 3729 | if ((hw->mac_type == e1000_80003es2lan) && |
3730 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3730 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3731 | swfw = E1000_SWFW_PHY1_SM; | 3731 | swfw = E1000_SWFW_PHY1_SM; |
3732 | } else { | 3732 | } else { |
3733 | swfw = E1000_SWFW_PHY0_SM; | 3733 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3737,7 +3737,7 @@ static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data) | |||
3737 | 3737 | ||
3738 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | 3738 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & |
3739 | E1000_KUMCTRLSTA_OFFSET) | data; | 3739 | E1000_KUMCTRLSTA_OFFSET) | data; |
3740 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | 3740 | ew32(KUMCTRLSTA, reg_val); |
3741 | udelay(2); | 3741 | udelay(2); |
3742 | 3742 | ||
3743 | e1000_swfw_sync_release(hw, swfw); | 3743 | e1000_swfw_sync_release(hw, swfw); |
@@ -3768,7 +3768,7 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3768 | 3768 | ||
3769 | if (hw->mac_type > e1000_82543) { | 3769 | if (hw->mac_type > e1000_82543) { |
3770 | if ((hw->mac_type == e1000_80003es2lan) && | 3770 | if ((hw->mac_type == e1000_80003es2lan) && |
3771 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3771 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3772 | swfw = E1000_SWFW_PHY1_SM; | 3772 | swfw = E1000_SWFW_PHY1_SM; |
3773 | } else { | 3773 | } else { |
3774 | swfw = E1000_SWFW_PHY0_SM; | 3774 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3783,17 +3783,17 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3783 | * and deassert. For e1000_82571 hardware and later, we instead delay | 3783 | * and deassert. For e1000_82571 hardware and later, we instead delay |
3784 | * for 50us between and 10ms after the deassertion. | 3784 | * for 50us between and 10ms after the deassertion. |
3785 | */ | 3785 | */ |
3786 | ctrl = E1000_READ_REG(hw, CTRL); | 3786 | ctrl = er32(CTRL); |
3787 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); | 3787 | ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); |
3788 | E1000_WRITE_FLUSH(hw); | 3788 | E1000_WRITE_FLUSH(); |
3789 | 3789 | ||
3790 | if (hw->mac_type < e1000_82571) | 3790 | if (hw->mac_type < e1000_82571) |
3791 | msleep(10); | 3791 | msleep(10); |
3792 | else | 3792 | else |
3793 | udelay(100); | 3793 | udelay(100); |
3794 | 3794 | ||
3795 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3795 | ew32(CTRL, ctrl); |
3796 | E1000_WRITE_FLUSH(hw); | 3796 | E1000_WRITE_FLUSH(); |
3797 | 3797 | ||
3798 | if (hw->mac_type >= e1000_82571) | 3798 | if (hw->mac_type >= e1000_82571) |
3799 | mdelay(10); | 3799 | mdelay(10); |
@@ -3803,24 +3803,24 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3803 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3803 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
3804 | * bit to put the PHY into reset. Then, take it out of reset. | 3804 | * bit to put the PHY into reset. Then, take it out of reset. |
3805 | */ | 3805 | */ |
3806 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 3806 | ctrl_ext = er32(CTRL_EXT); |
3807 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; | 3807 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; |
3808 | ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; | 3808 | ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; |
3809 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 3809 | ew32(CTRL_EXT, ctrl_ext); |
3810 | E1000_WRITE_FLUSH(hw); | 3810 | E1000_WRITE_FLUSH(); |
3811 | msleep(10); | 3811 | msleep(10); |
3812 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; | 3812 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; |
3813 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 3813 | ew32(CTRL_EXT, ctrl_ext); |
3814 | E1000_WRITE_FLUSH(hw); | 3814 | E1000_WRITE_FLUSH(); |
3815 | } | 3815 | } |
3816 | udelay(150); | 3816 | udelay(150); |
3817 | 3817 | ||
3818 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 3818 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
3819 | /* Configure activity LED after PHY reset */ | 3819 | /* Configure activity LED after PHY reset */ |
3820 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 3820 | led_ctrl = er32(LEDCTL); |
3821 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 3821 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
3822 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 3822 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
3823 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 3823 | ew32(LEDCTL, led_ctrl); |
3824 | } | 3824 | } |
3825 | 3825 | ||
3826 | /* Wait for FW to finish PHY configuration. */ | 3826 | /* Wait for FW to finish PHY configuration. */ |
@@ -3906,8 +3906,8 @@ void e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3906 | 3906 | ||
3907 | do { | 3907 | do { |
3908 | /* Disable link */ | 3908 | /* Disable link */ |
3909 | reg = E1000_READ_REG(hw, PHY_CTRL); | 3909 | reg = er32(PHY_CTRL); |
3910 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | 3910 | ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | |
3911 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 3911 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
3912 | 3912 | ||
3913 | /* Write VR power-down enable - bits 9:8 should be 10b */ | 3913 | /* Write VR power-down enable - bits 9:8 should be 10b */ |
@@ -3922,8 +3922,8 @@ void e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3922 | break; | 3922 | break; |
3923 | 3923 | ||
3924 | /* Issue PHY reset and repeat at most one more time */ | 3924 | /* Issue PHY reset and repeat at most one more time */ |
3925 | reg = E1000_READ_REG(hw, CTRL); | 3925 | reg = er32(CTRL); |
3926 | E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST); | 3926 | ew32(CTRL, reg | E1000_CTRL_PHY_RST); |
3927 | retry++; | 3927 | retry++; |
3928 | } while (retry); | 3928 | } while (retry); |
3929 | 3929 | ||
@@ -3981,8 +3981,8 @@ static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | |||
3981 | mdelay(5); | 3981 | mdelay(5); |
3982 | } | 3982 | } |
3983 | /* Disable GigE link negotiation */ | 3983 | /* Disable GigE link negotiation */ |
3984 | reg = E1000_READ_REG(hw, PHY_CTRL); | 3984 | reg = er32(PHY_CTRL); |
3985 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | 3985 | ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | |
3986 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 3986 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
3987 | 3987 | ||
3988 | /* unable to acquire PCS lock */ | 3988 | /* unable to acquire PCS lock */ |
@@ -4388,7 +4388,7 @@ s32 e1000_validate_mdi_setting(struct e1000_hw *hw) | |||
4388 | s32 e1000_init_eeprom_params(struct e1000_hw *hw) | 4388 | s32 e1000_init_eeprom_params(struct e1000_hw *hw) |
4389 | { | 4389 | { |
4390 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4390 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4391 | u32 eecd = E1000_READ_REG(hw, EECD); | 4391 | u32 eecd = er32(EECD); |
4392 | s32 ret_val = E1000_SUCCESS; | 4392 | s32 ret_val = E1000_SUCCESS; |
4393 | u16 eeprom_size; | 4393 | u16 eeprom_size; |
4394 | 4394 | ||
@@ -4490,7 +4490,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4490 | /* Ensure that the Autonomous FLASH update bit is cleared due to | 4490 | /* Ensure that the Autonomous FLASH update bit is cleared due to |
4491 | * Flash update issue on parts which use a FLASH for NVM. */ | 4491 | * Flash update issue on parts which use a FLASH for NVM. */ |
4492 | eecd &= ~E1000_EECD_AUPDEN; | 4492 | eecd &= ~E1000_EECD_AUPDEN; |
4493 | E1000_WRITE_REG(hw, EECD, eecd); | 4493 | ew32(EECD, eecd); |
4494 | } | 4494 | } |
4495 | break; | 4495 | break; |
4496 | case e1000_80003es2lan: | 4496 | case e1000_80003es2lan: |
@@ -4580,8 +4580,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) | |||
4580 | * wait <delay> microseconds. | 4580 | * wait <delay> microseconds. |
4581 | */ | 4581 | */ |
4582 | *eecd = *eecd | E1000_EECD_SK; | 4582 | *eecd = *eecd | E1000_EECD_SK; |
4583 | E1000_WRITE_REG(hw, EECD, *eecd); | 4583 | ew32(EECD, *eecd); |
4584 | E1000_WRITE_FLUSH(hw); | 4584 | E1000_WRITE_FLUSH(); |
4585 | udelay(hw->eeprom.delay_usec); | 4585 | udelay(hw->eeprom.delay_usec); |
4586 | } | 4586 | } |
4587 | 4587 | ||
@@ -4597,8 +4597,8 @@ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) | |||
4597 | * wait 50 microseconds. | 4597 | * wait 50 microseconds. |
4598 | */ | 4598 | */ |
4599 | *eecd = *eecd & ~E1000_EECD_SK; | 4599 | *eecd = *eecd & ~E1000_EECD_SK; |
4600 | E1000_WRITE_REG(hw, EECD, *eecd); | 4600 | ew32(EECD, *eecd); |
4601 | E1000_WRITE_FLUSH(hw); | 4601 | E1000_WRITE_FLUSH(); |
4602 | udelay(hw->eeprom.delay_usec); | 4602 | udelay(hw->eeprom.delay_usec); |
4603 | } | 4603 | } |
4604 | 4604 | ||
@@ -4620,7 +4620,7 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) | |||
4620 | * In order to do this, "data" must be broken down into bits. | 4620 | * In order to do this, "data" must be broken down into bits. |
4621 | */ | 4621 | */ |
4622 | mask = 0x01 << (count - 1); | 4622 | mask = 0x01 << (count - 1); |
4623 | eecd = E1000_READ_REG(hw, EECD); | 4623 | eecd = er32(EECD); |
4624 | if (eeprom->type == e1000_eeprom_microwire) { | 4624 | if (eeprom->type == e1000_eeprom_microwire) { |
4625 | eecd &= ~E1000_EECD_DO; | 4625 | eecd &= ~E1000_EECD_DO; |
4626 | } else if (eeprom->type == e1000_eeprom_spi) { | 4626 | } else if (eeprom->type == e1000_eeprom_spi) { |
@@ -4637,8 +4637,8 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) | |||
4637 | if (data & mask) | 4637 | if (data & mask) |
4638 | eecd |= E1000_EECD_DI; | 4638 | eecd |= E1000_EECD_DI; |
4639 | 4639 | ||
4640 | E1000_WRITE_REG(hw, EECD, eecd); | 4640 | ew32(EECD, eecd); |
4641 | E1000_WRITE_FLUSH(hw); | 4641 | E1000_WRITE_FLUSH(); |
4642 | 4642 | ||
4643 | udelay(eeprom->delay_usec); | 4643 | udelay(eeprom->delay_usec); |
4644 | 4644 | ||
@@ -4651,7 +4651,7 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) | |||
4651 | 4651 | ||
4652 | /* We leave the "DI" bit set to "0" when we leave this routine. */ | 4652 | /* We leave the "DI" bit set to "0" when we leave this routine. */ |
4653 | eecd &= ~E1000_EECD_DI; | 4653 | eecd &= ~E1000_EECD_DI; |
4654 | E1000_WRITE_REG(hw, EECD, eecd); | 4654 | ew32(EECD, eecd); |
4655 | } | 4655 | } |
4656 | 4656 | ||
4657 | /****************************************************************************** | 4657 | /****************************************************************************** |
@@ -4672,7 +4672,7 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) | |||
4672 | * always be clear. | 4672 | * always be clear. |
4673 | */ | 4673 | */ |
4674 | 4674 | ||
4675 | eecd = E1000_READ_REG(hw, EECD); | 4675 | eecd = er32(EECD); |
4676 | 4676 | ||
4677 | eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); | 4677 | eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); |
4678 | data = 0; | 4678 | data = 0; |
@@ -4681,7 +4681,7 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) | |||
4681 | data = data << 1; | 4681 | data = data << 1; |
4682 | e1000_raise_ee_clk(hw, &eecd); | 4682 | e1000_raise_ee_clk(hw, &eecd); |
4683 | 4683 | ||
4684 | eecd = E1000_READ_REG(hw, EECD); | 4684 | eecd = er32(EECD); |
4685 | 4685 | ||
4686 | eecd &= ~(E1000_EECD_DI); | 4686 | eecd &= ~(E1000_EECD_DI); |
4687 | if (eecd & E1000_EECD_DO) | 4687 | if (eecd & E1000_EECD_DO) |
@@ -4710,23 +4710,23 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw) | |||
4710 | 4710 | ||
4711 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) | 4711 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) |
4712 | return -E1000_ERR_SWFW_SYNC; | 4712 | return -E1000_ERR_SWFW_SYNC; |
4713 | eecd = E1000_READ_REG(hw, EECD); | 4713 | eecd = er32(EECD); |
4714 | 4714 | ||
4715 | if (hw->mac_type != e1000_82573) { | 4715 | if (hw->mac_type != e1000_82573) { |
4716 | /* Request EEPROM Access */ | 4716 | /* Request EEPROM Access */ |
4717 | if (hw->mac_type > e1000_82544) { | 4717 | if (hw->mac_type > e1000_82544) { |
4718 | eecd |= E1000_EECD_REQ; | 4718 | eecd |= E1000_EECD_REQ; |
4719 | E1000_WRITE_REG(hw, EECD, eecd); | 4719 | ew32(EECD, eecd); |
4720 | eecd = E1000_READ_REG(hw, EECD); | 4720 | eecd = er32(EECD); |
4721 | while ((!(eecd & E1000_EECD_GNT)) && | 4721 | while ((!(eecd & E1000_EECD_GNT)) && |
4722 | (i < E1000_EEPROM_GRANT_ATTEMPTS)) { | 4722 | (i < E1000_EEPROM_GRANT_ATTEMPTS)) { |
4723 | i++; | 4723 | i++; |
4724 | udelay(5); | 4724 | udelay(5); |
4725 | eecd = E1000_READ_REG(hw, EECD); | 4725 | eecd = er32(EECD); |
4726 | } | 4726 | } |
4727 | if (!(eecd & E1000_EECD_GNT)) { | 4727 | if (!(eecd & E1000_EECD_GNT)) { |
4728 | eecd &= ~E1000_EECD_REQ; | 4728 | eecd &= ~E1000_EECD_REQ; |
4729 | E1000_WRITE_REG(hw, EECD, eecd); | 4729 | ew32(EECD, eecd); |
4730 | DEBUGOUT("Could not acquire EEPROM grant\n"); | 4730 | DEBUGOUT("Could not acquire EEPROM grant\n"); |
4731 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | 4731 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
4732 | return -E1000_ERR_EEPROM; | 4732 | return -E1000_ERR_EEPROM; |
@@ -4739,15 +4739,15 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw) | |||
4739 | if (eeprom->type == e1000_eeprom_microwire) { | 4739 | if (eeprom->type == e1000_eeprom_microwire) { |
4740 | /* Clear SK and DI */ | 4740 | /* Clear SK and DI */ |
4741 | eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); | 4741 | eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); |
4742 | E1000_WRITE_REG(hw, EECD, eecd); | 4742 | ew32(EECD, eecd); |
4743 | 4743 | ||
4744 | /* Set CS */ | 4744 | /* Set CS */ |
4745 | eecd |= E1000_EECD_CS; | 4745 | eecd |= E1000_EECD_CS; |
4746 | E1000_WRITE_REG(hw, EECD, eecd); | 4746 | ew32(EECD, eecd); |
4747 | } else if (eeprom->type == e1000_eeprom_spi) { | 4747 | } else if (eeprom->type == e1000_eeprom_spi) { |
4748 | /* Clear SK and CS */ | 4748 | /* Clear SK and CS */ |
4749 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 4749 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
4750 | E1000_WRITE_REG(hw, EECD, eecd); | 4750 | ew32(EECD, eecd); |
4751 | udelay(1); | 4751 | udelay(1); |
4752 | } | 4752 | } |
4753 | 4753 | ||
@@ -4764,40 +4764,40 @@ static void e1000_standby_eeprom(struct e1000_hw *hw) | |||
4764 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4764 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4765 | u32 eecd; | 4765 | u32 eecd; |
4766 | 4766 | ||
4767 | eecd = E1000_READ_REG(hw, EECD); | 4767 | eecd = er32(EECD); |
4768 | 4768 | ||
4769 | if (eeprom->type == e1000_eeprom_microwire) { | 4769 | if (eeprom->type == e1000_eeprom_microwire) { |
4770 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 4770 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
4771 | E1000_WRITE_REG(hw, EECD, eecd); | 4771 | ew32(EECD, eecd); |
4772 | E1000_WRITE_FLUSH(hw); | 4772 | E1000_WRITE_FLUSH(); |
4773 | udelay(eeprom->delay_usec); | 4773 | udelay(eeprom->delay_usec); |
4774 | 4774 | ||
4775 | /* Clock high */ | 4775 | /* Clock high */ |
4776 | eecd |= E1000_EECD_SK; | 4776 | eecd |= E1000_EECD_SK; |
4777 | E1000_WRITE_REG(hw, EECD, eecd); | 4777 | ew32(EECD, eecd); |
4778 | E1000_WRITE_FLUSH(hw); | 4778 | E1000_WRITE_FLUSH(); |
4779 | udelay(eeprom->delay_usec); | 4779 | udelay(eeprom->delay_usec); |
4780 | 4780 | ||
4781 | /* Select EEPROM */ | 4781 | /* Select EEPROM */ |
4782 | eecd |= E1000_EECD_CS; | 4782 | eecd |= E1000_EECD_CS; |
4783 | E1000_WRITE_REG(hw, EECD, eecd); | 4783 | ew32(EECD, eecd); |
4784 | E1000_WRITE_FLUSH(hw); | 4784 | E1000_WRITE_FLUSH(); |
4785 | udelay(eeprom->delay_usec); | 4785 | udelay(eeprom->delay_usec); |
4786 | 4786 | ||
4787 | /* Clock low */ | 4787 | /* Clock low */ |
4788 | eecd &= ~E1000_EECD_SK; | 4788 | eecd &= ~E1000_EECD_SK; |
4789 | E1000_WRITE_REG(hw, EECD, eecd); | 4789 | ew32(EECD, eecd); |
4790 | E1000_WRITE_FLUSH(hw); | 4790 | E1000_WRITE_FLUSH(); |
4791 | udelay(eeprom->delay_usec); | 4791 | udelay(eeprom->delay_usec); |
4792 | } else if (eeprom->type == e1000_eeprom_spi) { | 4792 | } else if (eeprom->type == e1000_eeprom_spi) { |
4793 | /* Toggle CS to flush commands */ | 4793 | /* Toggle CS to flush commands */ |
4794 | eecd |= E1000_EECD_CS; | 4794 | eecd |= E1000_EECD_CS; |
4795 | E1000_WRITE_REG(hw, EECD, eecd); | 4795 | ew32(EECD, eecd); |
4796 | E1000_WRITE_FLUSH(hw); | 4796 | E1000_WRITE_FLUSH(); |
4797 | udelay(eeprom->delay_usec); | 4797 | udelay(eeprom->delay_usec); |
4798 | eecd &= ~E1000_EECD_CS; | 4798 | eecd &= ~E1000_EECD_CS; |
4799 | E1000_WRITE_REG(hw, EECD, eecd); | 4799 | ew32(EECD, eecd); |
4800 | E1000_WRITE_FLUSH(hw); | 4800 | E1000_WRITE_FLUSH(); |
4801 | udelay(eeprom->delay_usec); | 4801 | udelay(eeprom->delay_usec); |
4802 | } | 4802 | } |
4803 | } | 4803 | } |
@@ -4813,13 +4813,13 @@ static void e1000_release_eeprom(struct e1000_hw *hw) | |||
4813 | 4813 | ||
4814 | DEBUGFUNC("e1000_release_eeprom"); | 4814 | DEBUGFUNC("e1000_release_eeprom"); |
4815 | 4815 | ||
4816 | eecd = E1000_READ_REG(hw, EECD); | 4816 | eecd = er32(EECD); |
4817 | 4817 | ||
4818 | if (hw->eeprom.type == e1000_eeprom_spi) { | 4818 | if (hw->eeprom.type == e1000_eeprom_spi) { |
4819 | eecd |= E1000_EECD_CS; /* Pull CS high */ | 4819 | eecd |= E1000_EECD_CS; /* Pull CS high */ |
4820 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ | 4820 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ |
4821 | 4821 | ||
4822 | E1000_WRITE_REG(hw, EECD, eecd); | 4822 | ew32(EECD, eecd); |
4823 | 4823 | ||
4824 | udelay(hw->eeprom.delay_usec); | 4824 | udelay(hw->eeprom.delay_usec); |
4825 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { | 4825 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { |
@@ -4828,25 +4828,25 @@ static void e1000_release_eeprom(struct e1000_hw *hw) | |||
4828 | /* CS on Microwire is active-high */ | 4828 | /* CS on Microwire is active-high */ |
4829 | eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); | 4829 | eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); |
4830 | 4830 | ||
4831 | E1000_WRITE_REG(hw, EECD, eecd); | 4831 | ew32(EECD, eecd); |
4832 | 4832 | ||
4833 | /* Rising edge of clock */ | 4833 | /* Rising edge of clock */ |
4834 | eecd |= E1000_EECD_SK; | 4834 | eecd |= E1000_EECD_SK; |
4835 | E1000_WRITE_REG(hw, EECD, eecd); | 4835 | ew32(EECD, eecd); |
4836 | E1000_WRITE_FLUSH(hw); | 4836 | E1000_WRITE_FLUSH(); |
4837 | udelay(hw->eeprom.delay_usec); | 4837 | udelay(hw->eeprom.delay_usec); |
4838 | 4838 | ||
4839 | /* Falling edge of clock */ | 4839 | /* Falling edge of clock */ |
4840 | eecd &= ~E1000_EECD_SK; | 4840 | eecd &= ~E1000_EECD_SK; |
4841 | E1000_WRITE_REG(hw, EECD, eecd); | 4841 | ew32(EECD, eecd); |
4842 | E1000_WRITE_FLUSH(hw); | 4842 | E1000_WRITE_FLUSH(); |
4843 | udelay(hw->eeprom.delay_usec); | 4843 | udelay(hw->eeprom.delay_usec); |
4844 | } | 4844 | } |
4845 | 4845 | ||
4846 | /* Stop requesting EEPROM access */ | 4846 | /* Stop requesting EEPROM access */ |
4847 | if (hw->mac_type > e1000_82544) { | 4847 | if (hw->mac_type > e1000_82544) { |
4848 | eecd &= ~E1000_EECD_REQ; | 4848 | eecd &= ~E1000_EECD_REQ; |
4849 | E1000_WRITE_REG(hw, EECD, eecd); | 4849 | ew32(EECD, eecd); |
4850 | } | 4850 | } |
4851 | 4851 | ||
4852 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | 4852 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
@@ -5009,13 +5009,13 @@ static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, | |||
5009 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + | 5009 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + |
5010 | E1000_EEPROM_RW_REG_START; | 5010 | E1000_EEPROM_RW_REG_START; |
5011 | 5011 | ||
5012 | E1000_WRITE_REG(hw, EERD, eerd); | 5012 | ew32(EERD, eerd); |
5013 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); | 5013 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); |
5014 | 5014 | ||
5015 | if (error) { | 5015 | if (error) { |
5016 | break; | 5016 | break; |
5017 | } | 5017 | } |
5018 | data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); | 5018 | data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA); |
5019 | 5019 | ||
5020 | } | 5020 | } |
5021 | 5021 | ||
@@ -5050,7 +5050,7 @@ static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, | |||
5050 | break; | 5050 | break; |
5051 | } | 5051 | } |
5052 | 5052 | ||
5053 | E1000_WRITE_REG(hw, EEWR, register_value); | 5053 | ew32(EEWR, register_value); |
5054 | 5054 | ||
5055 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); | 5055 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); |
5056 | 5056 | ||
@@ -5076,9 +5076,9 @@ static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | |||
5076 | 5076 | ||
5077 | for (i = 0; i < attempts; i++) { | 5077 | for (i = 0; i < attempts; i++) { |
5078 | if (eerd == E1000_EEPROM_POLL_READ) | 5078 | if (eerd == E1000_EEPROM_POLL_READ) |
5079 | reg = E1000_READ_REG(hw, EERD); | 5079 | reg = er32(EERD); |
5080 | else | 5080 | else |
5081 | reg = E1000_READ_REG(hw, EEWR); | 5081 | reg = er32(EEWR); |
5082 | 5082 | ||
5083 | if (reg & E1000_EEPROM_RW_REG_DONE) { | 5083 | if (reg & E1000_EEPROM_RW_REG_DONE) { |
5084 | done = E1000_SUCCESS; | 5084 | done = E1000_SUCCESS; |
@@ -5105,7 +5105,7 @@ static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
5105 | return false; | 5105 | return false; |
5106 | 5106 | ||
5107 | if (hw->mac_type == e1000_82573) { | 5107 | if (hw->mac_type == e1000_82573) { |
5108 | eecd = E1000_READ_REG(hw, EECD); | 5108 | eecd = er32(EECD); |
5109 | 5109 | ||
5110 | /* Isolate bits 15 & 16 */ | 5110 | /* Isolate bits 15 & 16 */ |
5111 | eecd = ((eecd >> 15) & 0x03); | 5111 | eecd = ((eecd >> 15) & 0x03); |
@@ -5215,9 +5215,9 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5215 | e1000_commit_shadow_ram(hw); | 5215 | e1000_commit_shadow_ram(hw); |
5216 | /* Reload the EEPROM, or else modifications will not appear | 5216 | /* Reload the EEPROM, or else modifications will not appear |
5217 | * until after next adapter reset. */ | 5217 | * until after next adapter reset. */ |
5218 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 5218 | ctrl_ext = er32(CTRL_EXT); |
5219 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 5219 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
5220 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 5220 | ew32(CTRL_EXT, ctrl_ext); |
5221 | msleep(10); | 5221 | msleep(10); |
5222 | } | 5222 | } |
5223 | return E1000_SUCCESS; | 5223 | return E1000_SUCCESS; |
@@ -5395,7 +5395,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, | |||
5395 | * If DO does not go high in 10 milliseconds, then error out. | 5395 | * If DO does not go high in 10 milliseconds, then error out. |
5396 | */ | 5396 | */ |
5397 | for (i = 0; i < 200; i++) { | 5397 | for (i = 0; i < 200; i++) { |
5398 | eecd = E1000_READ_REG(hw, EECD); | 5398 | eecd = er32(EECD); |
5399 | if (eecd & E1000_EECD_DO) break; | 5399 | if (eecd & E1000_EECD_DO) break; |
5400 | udelay(50); | 5400 | udelay(50); |
5401 | } | 5401 | } |
@@ -5449,9 +5449,9 @@ static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5449 | 5449 | ||
5450 | if (hw->mac_type == e1000_82573) { | 5450 | if (hw->mac_type == e1000_82573) { |
5451 | /* The flop register will be used to determine if flash type is STM */ | 5451 | /* The flop register will be used to determine if flash type is STM */ |
5452 | flop = E1000_READ_REG(hw, FLOP); | 5452 | flop = er32(FLOP); |
5453 | for (i=0; i < attempts; i++) { | 5453 | for (i=0; i < attempts; i++) { |
5454 | eecd = E1000_READ_REG(hw, EECD); | 5454 | eecd = er32(EECD); |
5455 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5455 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
5456 | break; | 5456 | break; |
5457 | } | 5457 | } |
@@ -5464,14 +5464,14 @@ static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5464 | 5464 | ||
5465 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ | 5465 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ |
5466 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { | 5466 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { |
5467 | E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); | 5467 | ew32(HICR, E1000_HICR_FW_RESET); |
5468 | } | 5468 | } |
5469 | 5469 | ||
5470 | /* Perform the flash update */ | 5470 | /* Perform the flash update */ |
5471 | E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); | 5471 | ew32(EECD, eecd | E1000_EECD_FLUPD); |
5472 | 5472 | ||
5473 | for (i=0; i < attempts; i++) { | 5473 | for (i=0; i < attempts; i++) { |
5474 | eecd = E1000_READ_REG(hw, EECD); | 5474 | eecd = er32(EECD); |
5475 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5475 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
5476 | break; | 5476 | break; |
5477 | } | 5477 | } |
@@ -5487,7 +5487,7 @@ static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5487 | /* We're writing to the opposite bank so if we're on bank 1, | 5487 | /* We're writing to the opposite bank so if we're on bank 1, |
5488 | * write to bank 0 etc. We also need to erase the segment that | 5488 | * write to bank 0 etc. We also need to erase the segment that |
5489 | * is going to be written */ | 5489 | * is going to be written */ |
5490 | if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) { | 5490 | if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { |
5491 | new_bank_offset = hw->flash_bank_size * 2; | 5491 | new_bank_offset = hw->flash_bank_size * 2; |
5492 | old_bank_offset = 0; | 5492 | old_bank_offset = 0; |
5493 | e1000_erase_ich8_4k_segment(hw, 1); | 5493 | e1000_erase_ich8_4k_segment(hw, 1); |
@@ -5621,7 +5621,7 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw) | |||
5621 | case e1000_82546_rev_3: | 5621 | case e1000_82546_rev_3: |
5622 | case e1000_82571: | 5622 | case e1000_82571: |
5623 | case e1000_80003es2lan: | 5623 | case e1000_80003es2lan: |
5624 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 5624 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
5625 | hw->perm_mac_addr[5] ^= 0x01; | 5625 | hw->perm_mac_addr[5] ^= 0x01; |
5626 | break; | 5626 | break; |
5627 | } | 5627 | } |
@@ -5666,9 +5666,9 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5666 | DEBUGOUT("Clearing RAR[1-15]\n"); | 5666 | DEBUGOUT("Clearing RAR[1-15]\n"); |
5667 | for (i = 1; i < rar_num; i++) { | 5667 | for (i = 1; i < rar_num; i++) { |
5668 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5668 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5669 | E1000_WRITE_FLUSH(hw); | 5669 | E1000_WRITE_FLUSH(); |
5670 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5670 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5671 | E1000_WRITE_FLUSH(hw); | 5671 | E1000_WRITE_FLUSH(); |
5672 | } | 5672 | } |
5673 | } | 5673 | } |
5674 | 5674 | ||
@@ -5772,12 +5772,12 @@ void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) | |||
5772 | if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { | 5772 | if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { |
5773 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); | 5773 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); |
5774 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5774 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5775 | E1000_WRITE_FLUSH(hw); | 5775 | E1000_WRITE_FLUSH(); |
5776 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); | 5776 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); |
5777 | E1000_WRITE_FLUSH(hw); | 5777 | E1000_WRITE_FLUSH(); |
5778 | } else { | 5778 | } else { |
5779 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5779 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5780 | E1000_WRITE_FLUSH(hw); | 5780 | E1000_WRITE_FLUSH(); |
5781 | } | 5781 | } |
5782 | } | 5782 | } |
5783 | 5783 | ||
@@ -5831,9 +5831,9 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
5831 | } | 5831 | } |
5832 | 5832 | ||
5833 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5833 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
5834 | E1000_WRITE_FLUSH(hw); | 5834 | E1000_WRITE_FLUSH(); |
5835 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5835 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
5836 | E1000_WRITE_FLUSH(hw); | 5836 | E1000_WRITE_FLUSH(); |
5837 | } | 5837 | } |
5838 | 5838 | ||
5839 | /****************************************************************************** | 5839 | /****************************************************************************** |
@@ -5853,12 +5853,12 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | |||
5853 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | 5853 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { |
5854 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); | 5854 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); |
5855 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5855 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5856 | E1000_WRITE_FLUSH(hw); | 5856 | E1000_WRITE_FLUSH(); |
5857 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); | 5857 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); |
5858 | E1000_WRITE_FLUSH(hw); | 5858 | E1000_WRITE_FLUSH(); |
5859 | } else { | 5859 | } else { |
5860 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5860 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5861 | E1000_WRITE_FLUSH(hw); | 5861 | E1000_WRITE_FLUSH(); |
5862 | } | 5862 | } |
5863 | } | 5863 | } |
5864 | 5864 | ||
@@ -5896,7 +5896,7 @@ static void e1000_clear_vfta(struct e1000_hw *hw) | |||
5896 | * manageability unit */ | 5896 | * manageability unit */ |
5897 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | 5897 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; |
5898 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); | 5898 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); |
5899 | E1000_WRITE_FLUSH(hw); | 5899 | E1000_WRITE_FLUSH(); |
5900 | } | 5900 | } |
5901 | } | 5901 | } |
5902 | 5902 | ||
@@ -5916,7 +5916,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw) | |||
5916 | return E1000_SUCCESS; | 5916 | return E1000_SUCCESS; |
5917 | } | 5917 | } |
5918 | 5918 | ||
5919 | ledctl = E1000_READ_REG(hw, LEDCTL); | 5919 | ledctl = er32(LEDCTL); |
5920 | hw->ledctl_default = ledctl; | 5920 | hw->ledctl_default = ledctl; |
5921 | hw->ledctl_mode1 = hw->ledctl_default; | 5921 | hw->ledctl_mode1 = hw->ledctl_default; |
5922 | hw->ledctl_mode2 = hw->ledctl_default; | 5922 | hw->ledctl_mode2 = hw->ledctl_default; |
@@ -6013,7 +6013,7 @@ s32 e1000_setup_led(struct e1000_hw *hw) | |||
6013 | /* Fall Through */ | 6013 | /* Fall Through */ |
6014 | default: | 6014 | default: |
6015 | if (hw->media_type == e1000_media_type_fiber) { | 6015 | if (hw->media_type == e1000_media_type_fiber) { |
6016 | ledctl = E1000_READ_REG(hw, LEDCTL); | 6016 | ledctl = er32(LEDCTL); |
6017 | /* Save current LEDCTL settings */ | 6017 | /* Save current LEDCTL settings */ |
6018 | hw->ledctl_default = ledctl; | 6018 | hw->ledctl_default = ledctl; |
6019 | /* Turn off LED0 */ | 6019 | /* Turn off LED0 */ |
@@ -6022,9 +6022,9 @@ s32 e1000_setup_led(struct e1000_hw *hw) | |||
6022 | E1000_LEDCTL_LED0_MODE_MASK); | 6022 | E1000_LEDCTL_LED0_MODE_MASK); |
6023 | ledctl |= (E1000_LEDCTL_MODE_LED_OFF << | 6023 | ledctl |= (E1000_LEDCTL_MODE_LED_OFF << |
6024 | E1000_LEDCTL_LED0_MODE_SHIFT); | 6024 | E1000_LEDCTL_LED0_MODE_SHIFT); |
6025 | E1000_WRITE_REG(hw, LEDCTL, ledctl); | 6025 | ew32(LEDCTL, ledctl); |
6026 | } else if (hw->media_type == e1000_media_type_copper) | 6026 | } else if (hw->media_type == e1000_media_type_copper) |
6027 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6027 | ew32(LEDCTL, hw->ledctl_mode1); |
6028 | break; | 6028 | break; |
6029 | } | 6029 | } |
6030 | 6030 | ||
@@ -6064,7 +6064,7 @@ s32 e1000_blink_led_start(struct e1000_hw *hw) | |||
6064 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); | 6064 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); |
6065 | } | 6065 | } |
6066 | 6066 | ||
6067 | E1000_WRITE_REG(hw, LEDCTL, ledctl_blink); | 6067 | ew32(LEDCTL, ledctl_blink); |
6068 | 6068 | ||
6069 | return E1000_SUCCESS; | 6069 | return E1000_SUCCESS; |
6070 | } | 6070 | } |
@@ -6103,7 +6103,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw) | |||
6103 | break; | 6103 | break; |
6104 | } | 6104 | } |
6105 | /* Restore LEDCTL settings */ | 6105 | /* Restore LEDCTL settings */ |
6106 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); | 6106 | ew32(LEDCTL, hw->ledctl_default); |
6107 | break; | 6107 | break; |
6108 | } | 6108 | } |
6109 | 6109 | ||
@@ -6117,7 +6117,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw) | |||
6117 | *****************************************************************************/ | 6117 | *****************************************************************************/ |
6118 | s32 e1000_led_on(struct e1000_hw *hw) | 6118 | s32 e1000_led_on(struct e1000_hw *hw) |
6119 | { | 6119 | { |
6120 | u32 ctrl = E1000_READ_REG(hw, CTRL); | 6120 | u32 ctrl = er32(CTRL); |
6121 | 6121 | ||
6122 | DEBUGFUNC("e1000_led_on"); | 6122 | DEBUGFUNC("e1000_led_on"); |
6123 | 6123 | ||
@@ -6149,13 +6149,13 @@ s32 e1000_led_on(struct e1000_hw *hw) | |||
6149 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | 6149 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, |
6150 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); | 6150 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); |
6151 | } else if (hw->media_type == e1000_media_type_copper) { | 6151 | } else if (hw->media_type == e1000_media_type_copper) { |
6152 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); | 6152 | ew32(LEDCTL, hw->ledctl_mode2); |
6153 | return E1000_SUCCESS; | 6153 | return E1000_SUCCESS; |
6154 | } | 6154 | } |
6155 | break; | 6155 | break; |
6156 | } | 6156 | } |
6157 | 6157 | ||
6158 | E1000_WRITE_REG(hw, CTRL, ctrl); | 6158 | ew32(CTRL, ctrl); |
6159 | 6159 | ||
6160 | return E1000_SUCCESS; | 6160 | return E1000_SUCCESS; |
6161 | } | 6161 | } |
@@ -6167,7 +6167,7 @@ s32 e1000_led_on(struct e1000_hw *hw) | |||
6167 | *****************************************************************************/ | 6167 | *****************************************************************************/ |
6168 | s32 e1000_led_off(struct e1000_hw *hw) | 6168 | s32 e1000_led_off(struct e1000_hw *hw) |
6169 | { | 6169 | { |
6170 | u32 ctrl = E1000_READ_REG(hw, CTRL); | 6170 | u32 ctrl = er32(CTRL); |
6171 | 6171 | ||
6172 | DEBUGFUNC("e1000_led_off"); | 6172 | DEBUGFUNC("e1000_led_off"); |
6173 | 6173 | ||
@@ -6199,13 +6199,13 @@ s32 e1000_led_off(struct e1000_hw *hw) | |||
6199 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | 6199 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, |
6200 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); | 6200 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); |
6201 | } else if (hw->media_type == e1000_media_type_copper) { | 6201 | } else if (hw->media_type == e1000_media_type_copper) { |
6202 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6202 | ew32(LEDCTL, hw->ledctl_mode1); |
6203 | return E1000_SUCCESS; | 6203 | return E1000_SUCCESS; |
6204 | } | 6204 | } |
6205 | break; | 6205 | break; |
6206 | } | 6206 | } |
6207 | 6207 | ||
6208 | E1000_WRITE_REG(hw, CTRL, ctrl); | 6208 | ew32(CTRL, ctrl); |
6209 | 6209 | ||
6210 | return E1000_SUCCESS; | 6210 | return E1000_SUCCESS; |
6211 | } | 6211 | } |
@@ -6219,93 +6219,93 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
6219 | { | 6219 | { |
6220 | volatile u32 temp; | 6220 | volatile u32 temp; |
6221 | 6221 | ||
6222 | temp = E1000_READ_REG(hw, CRCERRS); | 6222 | temp = er32(CRCERRS); |
6223 | temp = E1000_READ_REG(hw, SYMERRS); | 6223 | temp = er32(SYMERRS); |
6224 | temp = E1000_READ_REG(hw, MPC); | 6224 | temp = er32(MPC); |
6225 | temp = E1000_READ_REG(hw, SCC); | 6225 | temp = er32(SCC); |
6226 | temp = E1000_READ_REG(hw, ECOL); | 6226 | temp = er32(ECOL); |
6227 | temp = E1000_READ_REG(hw, MCC); | 6227 | temp = er32(MCC); |
6228 | temp = E1000_READ_REG(hw, LATECOL); | 6228 | temp = er32(LATECOL); |
6229 | temp = E1000_READ_REG(hw, COLC); | 6229 | temp = er32(COLC); |
6230 | temp = E1000_READ_REG(hw, DC); | 6230 | temp = er32(DC); |
6231 | temp = E1000_READ_REG(hw, SEC); | 6231 | temp = er32(SEC); |
6232 | temp = E1000_READ_REG(hw, RLEC); | 6232 | temp = er32(RLEC); |
6233 | temp = E1000_READ_REG(hw, XONRXC); | 6233 | temp = er32(XONRXC); |
6234 | temp = E1000_READ_REG(hw, XONTXC); | 6234 | temp = er32(XONTXC); |
6235 | temp = E1000_READ_REG(hw, XOFFRXC); | 6235 | temp = er32(XOFFRXC); |
6236 | temp = E1000_READ_REG(hw, XOFFTXC); | 6236 | temp = er32(XOFFTXC); |
6237 | temp = E1000_READ_REG(hw, FCRUC); | 6237 | temp = er32(FCRUC); |
6238 | 6238 | ||
6239 | if (hw->mac_type != e1000_ich8lan) { | 6239 | if (hw->mac_type != e1000_ich8lan) { |
6240 | temp = E1000_READ_REG(hw, PRC64); | 6240 | temp = er32(PRC64); |
6241 | temp = E1000_READ_REG(hw, PRC127); | 6241 | temp = er32(PRC127); |
6242 | temp = E1000_READ_REG(hw, PRC255); | 6242 | temp = er32(PRC255); |
6243 | temp = E1000_READ_REG(hw, PRC511); | 6243 | temp = er32(PRC511); |
6244 | temp = E1000_READ_REG(hw, PRC1023); | 6244 | temp = er32(PRC1023); |
6245 | temp = E1000_READ_REG(hw, PRC1522); | 6245 | temp = er32(PRC1522); |
6246 | } | 6246 | } |
6247 | 6247 | ||
6248 | temp = E1000_READ_REG(hw, GPRC); | 6248 | temp = er32(GPRC); |
6249 | temp = E1000_READ_REG(hw, BPRC); | 6249 | temp = er32(BPRC); |
6250 | temp = E1000_READ_REG(hw, MPRC); | 6250 | temp = er32(MPRC); |
6251 | temp = E1000_READ_REG(hw, GPTC); | 6251 | temp = er32(GPTC); |
6252 | temp = E1000_READ_REG(hw, GORCL); | 6252 | temp = er32(GORCL); |
6253 | temp = E1000_READ_REG(hw, GORCH); | 6253 | temp = er32(GORCH); |
6254 | temp = E1000_READ_REG(hw, GOTCL); | 6254 | temp = er32(GOTCL); |
6255 | temp = E1000_READ_REG(hw, GOTCH); | 6255 | temp = er32(GOTCH); |
6256 | temp = E1000_READ_REG(hw, RNBC); | 6256 | temp = er32(RNBC); |
6257 | temp = E1000_READ_REG(hw, RUC); | 6257 | temp = er32(RUC); |
6258 | temp = E1000_READ_REG(hw, RFC); | 6258 | temp = er32(RFC); |
6259 | temp = E1000_READ_REG(hw, ROC); | 6259 | temp = er32(ROC); |
6260 | temp = E1000_READ_REG(hw, RJC); | 6260 | temp = er32(RJC); |
6261 | temp = E1000_READ_REG(hw, TORL); | 6261 | temp = er32(TORL); |
6262 | temp = E1000_READ_REG(hw, TORH); | 6262 | temp = er32(TORH); |
6263 | temp = E1000_READ_REG(hw, TOTL); | 6263 | temp = er32(TOTL); |
6264 | temp = E1000_READ_REG(hw, TOTH); | 6264 | temp = er32(TOTH); |
6265 | temp = E1000_READ_REG(hw, TPR); | 6265 | temp = er32(TPR); |
6266 | temp = E1000_READ_REG(hw, TPT); | 6266 | temp = er32(TPT); |
6267 | 6267 | ||
6268 | if (hw->mac_type != e1000_ich8lan) { | 6268 | if (hw->mac_type != e1000_ich8lan) { |
6269 | temp = E1000_READ_REG(hw, PTC64); | 6269 | temp = er32(PTC64); |
6270 | temp = E1000_READ_REG(hw, PTC127); | 6270 | temp = er32(PTC127); |
6271 | temp = E1000_READ_REG(hw, PTC255); | 6271 | temp = er32(PTC255); |
6272 | temp = E1000_READ_REG(hw, PTC511); | 6272 | temp = er32(PTC511); |
6273 | temp = E1000_READ_REG(hw, PTC1023); | 6273 | temp = er32(PTC1023); |
6274 | temp = E1000_READ_REG(hw, PTC1522); | 6274 | temp = er32(PTC1522); |
6275 | } | 6275 | } |
6276 | 6276 | ||
6277 | temp = E1000_READ_REG(hw, MPTC); | 6277 | temp = er32(MPTC); |
6278 | temp = E1000_READ_REG(hw, BPTC); | 6278 | temp = er32(BPTC); |
6279 | 6279 | ||
6280 | if (hw->mac_type < e1000_82543) return; | 6280 | if (hw->mac_type < e1000_82543) return; |
6281 | 6281 | ||
6282 | temp = E1000_READ_REG(hw, ALGNERRC); | 6282 | temp = er32(ALGNERRC); |
6283 | temp = E1000_READ_REG(hw, RXERRC); | 6283 | temp = er32(RXERRC); |
6284 | temp = E1000_READ_REG(hw, TNCRS); | 6284 | temp = er32(TNCRS); |
6285 | temp = E1000_READ_REG(hw, CEXTERR); | 6285 | temp = er32(CEXTERR); |
6286 | temp = E1000_READ_REG(hw, TSCTC); | 6286 | temp = er32(TSCTC); |
6287 | temp = E1000_READ_REG(hw, TSCTFC); | 6287 | temp = er32(TSCTFC); |
6288 | 6288 | ||
6289 | if (hw->mac_type <= e1000_82544) return; | 6289 | if (hw->mac_type <= e1000_82544) return; |
6290 | 6290 | ||
6291 | temp = E1000_READ_REG(hw, MGTPRC); | 6291 | temp = er32(MGTPRC); |
6292 | temp = E1000_READ_REG(hw, MGTPDC); | 6292 | temp = er32(MGTPDC); |
6293 | temp = E1000_READ_REG(hw, MGTPTC); | 6293 | temp = er32(MGTPTC); |
6294 | 6294 | ||
6295 | if (hw->mac_type <= e1000_82547_rev_2) return; | 6295 | if (hw->mac_type <= e1000_82547_rev_2) return; |
6296 | 6296 | ||
6297 | temp = E1000_READ_REG(hw, IAC); | 6297 | temp = er32(IAC); |
6298 | temp = E1000_READ_REG(hw, ICRXOC); | 6298 | temp = er32(ICRXOC); |
6299 | 6299 | ||
6300 | if (hw->mac_type == e1000_ich8lan) return; | 6300 | if (hw->mac_type == e1000_ich8lan) return; |
6301 | 6301 | ||
6302 | temp = E1000_READ_REG(hw, ICRXPTC); | 6302 | temp = er32(ICRXPTC); |
6303 | temp = E1000_READ_REG(hw, ICRXATC); | 6303 | temp = er32(ICRXATC); |
6304 | temp = E1000_READ_REG(hw, ICTXPTC); | 6304 | temp = er32(ICTXPTC); |
6305 | temp = E1000_READ_REG(hw, ICTXATC); | 6305 | temp = er32(ICTXATC); |
6306 | temp = E1000_READ_REG(hw, ICTXQEC); | 6306 | temp = er32(ICTXQEC); |
6307 | temp = E1000_READ_REG(hw, ICTXQMTC); | 6307 | temp = er32(ICTXQMTC); |
6308 | temp = E1000_READ_REG(hw, ICRXDMTC); | 6308 | temp = er32(ICRXDMTC); |
6309 | } | 6309 | } |
6310 | 6310 | ||
6311 | /****************************************************************************** | 6311 | /****************************************************************************** |
@@ -6331,7 +6331,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw) | |||
6331 | hw->ifs_ratio = IFS_RATIO; | 6331 | hw->ifs_ratio = IFS_RATIO; |
6332 | } | 6332 | } |
6333 | hw->in_ifs_mode = false; | 6333 | hw->in_ifs_mode = false; |
6334 | E1000_WRITE_REG(hw, AIT, 0); | 6334 | ew32(AIT, 0); |
6335 | } else { | 6335 | } else { |
6336 | DEBUGOUT("Not in Adaptive IFS mode!\n"); | 6336 | DEBUGOUT("Not in Adaptive IFS mode!\n"); |
6337 | } | 6337 | } |
@@ -6358,14 +6358,14 @@ void e1000_update_adaptive(struct e1000_hw *hw) | |||
6358 | hw->current_ifs_val = hw->ifs_min_val; | 6358 | hw->current_ifs_val = hw->ifs_min_val; |
6359 | else | 6359 | else |
6360 | hw->current_ifs_val += hw->ifs_step_size; | 6360 | hw->current_ifs_val += hw->ifs_step_size; |
6361 | E1000_WRITE_REG(hw, AIT, hw->current_ifs_val); | 6361 | ew32(AIT, hw->current_ifs_val); |
6362 | } | 6362 | } |
6363 | } | 6363 | } |
6364 | } else { | 6364 | } else { |
6365 | if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { | 6365 | if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { |
6366 | hw->current_ifs_val = 0; | 6366 | hw->current_ifs_val = 0; |
6367 | hw->in_ifs_mode = false; | 6367 | hw->in_ifs_mode = false; |
6368 | E1000_WRITE_REG(hw, AIT, 0); | 6368 | ew32(AIT, 0); |
6369 | } | 6369 | } |
6370 | } | 6370 | } |
6371 | } else { | 6371 | } else { |
@@ -6489,7 +6489,7 @@ void e1000_get_bus_info(struct e1000_hw *hw) | |||
6489 | hw->bus_width = e1000_bus_width_pciex_1; | 6489 | hw->bus_width = e1000_bus_width_pciex_1; |
6490 | break; | 6490 | break; |
6491 | default: | 6491 | default: |
6492 | status = E1000_READ_REG(hw, STATUS); | 6492 | status = er32(STATUS); |
6493 | hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? | 6493 | hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? |
6494 | e1000_bus_type_pcix : e1000_bus_type_pci; | 6494 | e1000_bus_type_pcix : e1000_bus_type_pci; |
6495 | 6495 | ||
@@ -7114,7 +7114,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
7114 | /* MAC writes into PHY register based on the state transition | 7114 | /* MAC writes into PHY register based on the state transition |
7115 | * and start auto-negotiation. SW driver can overwrite the settings | 7115 | * and start auto-negotiation. SW driver can overwrite the settings |
7116 | * in CSR PHY power control E1000_PHY_CTRL register. */ | 7116 | * in CSR PHY power control E1000_PHY_CTRL register. */ |
7117 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | 7117 | phy_ctrl = er32(PHY_CTRL); |
7118 | } else { | 7118 | } else { |
7119 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7119 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
7120 | if (ret_val) | 7120 | if (ret_val) |
@@ -7131,7 +7131,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
7131 | } else { | 7131 | } else { |
7132 | if (hw->mac_type == e1000_ich8lan) { | 7132 | if (hw->mac_type == e1000_ich8lan) { |
7133 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | 7133 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; |
7134 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7134 | ew32(PHY_CTRL, phy_ctrl); |
7135 | } else { | 7135 | } else { |
7136 | phy_data &= ~IGP02E1000_PM_D3_LPLU; | 7136 | phy_data &= ~IGP02E1000_PM_D3_LPLU; |
7137 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7137 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
@@ -7182,7 +7182,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
7182 | } else { | 7182 | } else { |
7183 | if (hw->mac_type == e1000_ich8lan) { | 7183 | if (hw->mac_type == e1000_ich8lan) { |
7184 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | 7184 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; |
7185 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7185 | ew32(PHY_CTRL, phy_ctrl); |
7186 | } else { | 7186 | } else { |
7187 | phy_data |= IGP02E1000_PM_D3_LPLU; | 7187 | phy_data |= IGP02E1000_PM_D3_LPLU; |
7188 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7188 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
@@ -7231,7 +7231,7 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |||
7231 | return E1000_SUCCESS; | 7231 | return E1000_SUCCESS; |
7232 | 7232 | ||
7233 | if (hw->mac_type == e1000_ich8lan) { | 7233 | if (hw->mac_type == e1000_ich8lan) { |
7234 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | 7234 | phy_ctrl = er32(PHY_CTRL); |
7235 | } else { | 7235 | } else { |
7236 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7236 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
7237 | if (ret_val) | 7237 | if (ret_val) |
@@ -7241,7 +7241,7 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |||
7241 | if (!active) { | 7241 | if (!active) { |
7242 | if (hw->mac_type == e1000_ich8lan) { | 7242 | if (hw->mac_type == e1000_ich8lan) { |
7243 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | 7243 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; |
7244 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7244 | ew32(PHY_CTRL, phy_ctrl); |
7245 | } else { | 7245 | } else { |
7246 | phy_data &= ~IGP02E1000_PM_D0_LPLU; | 7246 | phy_data &= ~IGP02E1000_PM_D0_LPLU; |
7247 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7247 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
@@ -7282,7 +7282,7 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |||
7282 | 7282 | ||
7283 | if (hw->mac_type == e1000_ich8lan) { | 7283 | if (hw->mac_type == e1000_ich8lan) { |
7284 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | 7284 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; |
7285 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7285 | ew32(PHY_CTRL, phy_ctrl); |
7286 | } else { | 7286 | } else { |
7287 | phy_data |= IGP02E1000_PM_D0_LPLU; | 7287 | phy_data |= IGP02E1000_PM_D0_LPLU; |
7288 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7288 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
@@ -7404,14 +7404,14 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
7404 | u8 i; | 7404 | u8 i; |
7405 | 7405 | ||
7406 | /* Check that the host interface is enabled. */ | 7406 | /* Check that the host interface is enabled. */ |
7407 | hicr = E1000_READ_REG(hw, HICR); | 7407 | hicr = er32(HICR); |
7408 | if ((hicr & E1000_HICR_EN) == 0) { | 7408 | if ((hicr & E1000_HICR_EN) == 0) { |
7409 | DEBUGOUT("E1000_HOST_EN bit disabled.\n"); | 7409 | DEBUGOUT("E1000_HOST_EN bit disabled.\n"); |
7410 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 7410 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
7411 | } | 7411 | } |
7412 | /* check the previous command is completed */ | 7412 | /* check the previous command is completed */ |
7413 | for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { | 7413 | for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { |
7414 | hicr = E1000_READ_REG(hw, HICR); | 7414 | hicr = er32(HICR); |
7415 | if (!(hicr & E1000_HICR_C)) | 7415 | if (!(hicr & E1000_HICR_C)) |
7416 | break; | 7416 | break; |
7417 | mdelay(1); | 7417 | mdelay(1); |
@@ -7524,7 +7524,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, | |||
7524 | /* The device driver writes the relevant command block into the ram area. */ | 7524 | /* The device driver writes the relevant command block into the ram area. */ |
7525 | for (i = 0; i < length; i++) { | 7525 | for (i = 0; i < length; i++) { |
7526 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i)); | 7526 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i)); |
7527 | E1000_WRITE_FLUSH(hw); | 7527 | E1000_WRITE_FLUSH(); |
7528 | } | 7528 | } |
7529 | 7529 | ||
7530 | return E1000_SUCCESS; | 7530 | return E1000_SUCCESS; |
@@ -7541,9 +7541,9 @@ static s32 e1000_mng_write_commit(struct e1000_hw *hw) | |||
7541 | { | 7541 | { |
7542 | u32 hicr; | 7542 | u32 hicr; |
7543 | 7543 | ||
7544 | hicr = E1000_READ_REG(hw, HICR); | 7544 | hicr = er32(HICR); |
7545 | /* Setting this bit tells the ARC that a new command is pending. */ | 7545 | /* Setting this bit tells the ARC that a new command is pending. */ |
7546 | E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); | 7546 | ew32(HICR, hicr | E1000_HICR_C); |
7547 | 7547 | ||
7548 | return E1000_SUCCESS; | 7548 | return E1000_SUCCESS; |
7549 | } | 7549 | } |
@@ -7558,7 +7558,7 @@ bool e1000_check_mng_mode(struct e1000_hw *hw) | |||
7558 | { | 7558 | { |
7559 | u32 fwsm; | 7559 | u32 fwsm; |
7560 | 7560 | ||
7561 | fwsm = E1000_READ_REG(hw, FWSM); | 7561 | fwsm = er32(FWSM); |
7562 | 7562 | ||
7563 | if (hw->mac_type == e1000_ich8lan) { | 7563 | if (hw->mac_type == e1000_ich8lan) { |
7564 | if ((fwsm & E1000_FWSM_MODE_MASK) == | 7564 | if ((fwsm & E1000_FWSM_MODE_MASK) == |
@@ -7671,14 +7671,14 @@ u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) | |||
7671 | u32 fwsm, factps; | 7671 | u32 fwsm, factps; |
7672 | 7672 | ||
7673 | if (hw->asf_firmware_present) { | 7673 | if (hw->asf_firmware_present) { |
7674 | manc = E1000_READ_REG(hw, MANC); | 7674 | manc = er32(MANC); |
7675 | 7675 | ||
7676 | if (!(manc & E1000_MANC_RCV_TCO_EN) || | 7676 | if (!(manc & E1000_MANC_RCV_TCO_EN) || |
7677 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) | 7677 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) |
7678 | return false; | 7678 | return false; |
7679 | if (e1000_arc_subsystem_valid(hw)) { | 7679 | if (e1000_arc_subsystem_valid(hw)) { |
7680 | fwsm = E1000_READ_REG(hw, FWSM); | 7680 | fwsm = er32(FWSM); |
7681 | factps = E1000_READ_REG(hw, FACTPS); | 7681 | factps = er32(FACTPS); |
7682 | 7682 | ||
7683 | if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == | 7683 | if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == |
7684 | e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) | 7684 | e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) |
@@ -7792,9 +7792,9 @@ static void e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7792 | if (hw->bus_type != e1000_bus_type_pci_express) | 7792 | if (hw->bus_type != e1000_bus_type_pci_express) |
7793 | return; | 7793 | return; |
7794 | 7794 | ||
7795 | ctrl = E1000_READ_REG(hw, CTRL); | 7795 | ctrl = er32(CTRL); |
7796 | ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; | 7796 | ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; |
7797 | E1000_WRITE_REG(hw, CTRL, ctrl); | 7797 | ew32(CTRL, ctrl); |
7798 | } | 7798 | } |
7799 | 7799 | ||
7800 | /******************************************************************************* | 7800 | /******************************************************************************* |
@@ -7820,7 +7820,7 @@ s32 e1000_disable_pciex_master(struct e1000_hw *hw) | |||
7820 | e1000_set_pci_express_master_disable(hw); | 7820 | e1000_set_pci_express_master_disable(hw); |
7821 | 7821 | ||
7822 | while (timeout) { | 7822 | while (timeout) { |
7823 | if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) | 7823 | if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) |
7824 | break; | 7824 | break; |
7825 | else | 7825 | else |
7826 | udelay(100); | 7826 | udelay(100); |
@@ -7861,7 +7861,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
7861 | case e1000_80003es2lan: | 7861 | case e1000_80003es2lan: |
7862 | case e1000_ich8lan: | 7862 | case e1000_ich8lan: |
7863 | while (timeout) { | 7863 | while (timeout) { |
7864 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) | 7864 | if (er32(EECD) & E1000_EECD_AUTO_RD) |
7865 | break; | 7865 | break; |
7866 | else msleep(1); | 7866 | else msleep(1); |
7867 | timeout--; | 7867 | timeout--; |
@@ -7905,13 +7905,13 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
7905 | break; | 7905 | break; |
7906 | case e1000_80003es2lan: | 7906 | case e1000_80003es2lan: |
7907 | /* Separate *_CFG_DONE_* bit for each port */ | 7907 | /* Separate *_CFG_DONE_* bit for each port */ |
7908 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 7908 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
7909 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; | 7909 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; |
7910 | /* Fall Through */ | 7910 | /* Fall Through */ |
7911 | case e1000_82571: | 7911 | case e1000_82571: |
7912 | case e1000_82572: | 7912 | case e1000_82572: |
7913 | while (timeout) { | 7913 | while (timeout) { |
7914 | if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask) | 7914 | if (er32(EEMNGCTL) & cfg_mask) |
7915 | break; | 7915 | break; |
7916 | else | 7916 | else |
7917 | msleep(1); | 7917 | msleep(1); |
@@ -7957,11 +7957,11 @@ static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
7957 | /* Get the FW semaphore. */ | 7957 | /* Get the FW semaphore. */ |
7958 | timeout = hw->eeprom.word_size + 1; | 7958 | timeout = hw->eeprom.word_size + 1; |
7959 | while (timeout) { | 7959 | while (timeout) { |
7960 | swsm = E1000_READ_REG(hw, SWSM); | 7960 | swsm = er32(SWSM); |
7961 | swsm |= E1000_SWSM_SWESMBI; | 7961 | swsm |= E1000_SWSM_SWESMBI; |
7962 | E1000_WRITE_REG(hw, SWSM, swsm); | 7962 | ew32(SWSM, swsm); |
7963 | /* if we managed to set the bit we got the semaphore. */ | 7963 | /* if we managed to set the bit we got the semaphore. */ |
7964 | swsm = E1000_READ_REG(hw, SWSM); | 7964 | swsm = er32(SWSM); |
7965 | if (swsm & E1000_SWSM_SWESMBI) | 7965 | if (swsm & E1000_SWSM_SWESMBI) |
7966 | break; | 7966 | break; |
7967 | 7967 | ||
@@ -7996,13 +7996,13 @@ static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
7996 | if (!hw->eeprom_semaphore_present) | 7996 | if (!hw->eeprom_semaphore_present) |
7997 | return; | 7997 | return; |
7998 | 7998 | ||
7999 | swsm = E1000_READ_REG(hw, SWSM); | 7999 | swsm = er32(SWSM); |
8000 | if (hw->mac_type == e1000_80003es2lan) { | 8000 | if (hw->mac_type == e1000_80003es2lan) { |
8001 | /* Release both semaphores. */ | 8001 | /* Release both semaphores. */ |
8002 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | 8002 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); |
8003 | } else | 8003 | } else |
8004 | swsm &= ~(E1000_SWSM_SWESMBI); | 8004 | swsm &= ~(E1000_SWSM_SWESMBI); |
8005 | E1000_WRITE_REG(hw, SWSM, swsm); | 8005 | ew32(SWSM, swsm); |
8006 | } | 8006 | } |
8007 | 8007 | ||
8008 | /*************************************************************************** | 8008 | /*************************************************************************** |
@@ -8027,7 +8027,7 @@ static s32 e1000_get_software_semaphore(struct e1000_hw *hw) | |||
8027 | } | 8027 | } |
8028 | 8028 | ||
8029 | while (timeout) { | 8029 | while (timeout) { |
8030 | swsm = E1000_READ_REG(hw, SWSM); | 8030 | swsm = er32(SWSM); |
8031 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ | 8031 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ |
8032 | if (!(swsm & E1000_SWSM_SMBI)) | 8032 | if (!(swsm & E1000_SWSM_SMBI)) |
8033 | break; | 8033 | break; |
@@ -8060,10 +8060,10 @@ static void e1000_release_software_semaphore(struct e1000_hw *hw) | |||
8060 | return; | 8060 | return; |
8061 | } | 8061 | } |
8062 | 8062 | ||
8063 | swsm = E1000_READ_REG(hw, SWSM); | 8063 | swsm = er32(SWSM); |
8064 | /* Release the SW semaphores.*/ | 8064 | /* Release the SW semaphores.*/ |
8065 | swsm &= ~E1000_SWSM_SMBI; | 8065 | swsm &= ~E1000_SWSM_SMBI; |
8066 | E1000_WRITE_REG(hw, SWSM, swsm); | 8066 | ew32(SWSM, swsm); |
8067 | } | 8067 | } |
8068 | 8068 | ||
8069 | /****************************************************************************** | 8069 | /****************************************************************************** |
@@ -8083,13 +8083,13 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw) | |||
8083 | u32 fwsm = 0; | 8083 | u32 fwsm = 0; |
8084 | 8084 | ||
8085 | if (hw->mac_type == e1000_ich8lan) { | 8085 | if (hw->mac_type == e1000_ich8lan) { |
8086 | fwsm = E1000_READ_REG(hw, FWSM); | 8086 | fwsm = er32(FWSM); |
8087 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS | 8087 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS |
8088 | : E1000_BLK_PHY_RESET; | 8088 | : E1000_BLK_PHY_RESET; |
8089 | } | 8089 | } |
8090 | 8090 | ||
8091 | if (hw->mac_type > e1000_82547_rev_2) | 8091 | if (hw->mac_type > e1000_82547_rev_2) |
8092 | manc = E1000_READ_REG(hw, MANC); | 8092 | manc = er32(MANC); |
8093 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | 8093 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? |
8094 | E1000_BLK_PHY_RESET : E1000_SUCCESS; | 8094 | E1000_BLK_PHY_RESET : E1000_SUCCESS; |
8095 | } | 8095 | } |
@@ -8108,7 +8108,7 @@ static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
8108 | case e1000_82572: | 8108 | case e1000_82572: |
8109 | case e1000_82573: | 8109 | case e1000_82573: |
8110 | case e1000_80003es2lan: | 8110 | case e1000_80003es2lan: |
8111 | fwsm = E1000_READ_REG(hw, FWSM); | 8111 | fwsm = er32(FWSM); |
8112 | if ((fwsm & E1000_FWSM_MODE_MASK) != 0) | 8112 | if ((fwsm & E1000_FWSM_MODE_MASK) != 0) |
8113 | return true; | 8113 | return true; |
8114 | break; | 8114 | break; |
@@ -8143,19 +8143,19 @@ static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) | |||
8143 | return E1000_SUCCESS; | 8143 | return E1000_SUCCESS; |
8144 | 8144 | ||
8145 | if (no_snoop) { | 8145 | if (no_snoop) { |
8146 | gcr_reg = E1000_READ_REG(hw, GCR); | 8146 | gcr_reg = er32(GCR); |
8147 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); | 8147 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); |
8148 | gcr_reg |= no_snoop; | 8148 | gcr_reg |= no_snoop; |
8149 | E1000_WRITE_REG(hw, GCR, gcr_reg); | 8149 | ew32(GCR, gcr_reg); |
8150 | } | 8150 | } |
8151 | if (hw->mac_type == e1000_ich8lan) { | 8151 | if (hw->mac_type == e1000_ich8lan) { |
8152 | u32 ctrl_ext; | 8152 | u32 ctrl_ext; |
8153 | 8153 | ||
8154 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); | 8154 | ew32(GCR, PCI_EX_82566_SNOOP_ALL); |
8155 | 8155 | ||
8156 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 8156 | ctrl_ext = er32(CTRL_EXT); |
8157 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 8157 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
8158 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 8158 | ew32(CTRL_EXT, ctrl_ext); |
8159 | } | 8159 | } |
8160 | 8160 | ||
8161 | return E1000_SUCCESS; | 8161 | return E1000_SUCCESS; |
@@ -8179,11 +8179,11 @@ static s32 e1000_get_software_flag(struct e1000_hw *hw) | |||
8179 | 8179 | ||
8180 | if (hw->mac_type == e1000_ich8lan) { | 8180 | if (hw->mac_type == e1000_ich8lan) { |
8181 | while (timeout) { | 8181 | while (timeout) { |
8182 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 8182 | extcnf_ctrl = er32(EXTCNF_CTRL); |
8183 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | 8183 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; |
8184 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 8184 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
8185 | 8185 | ||
8186 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 8186 | extcnf_ctrl = er32(EXTCNF_CTRL); |
8187 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | 8187 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) |
8188 | break; | 8188 | break; |
8189 | mdelay(1); | 8189 | mdelay(1); |
@@ -8215,9 +8215,9 @@ static void e1000_release_software_flag(struct e1000_hw *hw) | |||
8215 | DEBUGFUNC("e1000_release_software_flag"); | 8215 | DEBUGFUNC("e1000_release_software_flag"); |
8216 | 8216 | ||
8217 | if (hw->mac_type == e1000_ich8lan) { | 8217 | if (hw->mac_type == e1000_ich8lan) { |
8218 | extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL); | 8218 | extcnf_ctrl= er32(EXTCNF_CTRL); |
8219 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 8219 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
8220 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 8220 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
8221 | } | 8221 | } |
8222 | 8222 | ||
8223 | return; | 8223 | return; |
@@ -8248,7 +8248,7 @@ static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | |||
8248 | * to be updated with each read. | 8248 | * to be updated with each read. |
8249 | */ | 8249 | */ |
8250 | /* Value of bit 22 corresponds to the flash bank we're on. */ | 8250 | /* Value of bit 22 corresponds to the flash bank we're on. */ |
8251 | flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; | 8251 | flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; |
8252 | 8252 | ||
8253 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ | 8253 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ |
8254 | bank_offset = flash_bank * (hw->flash_bank_size * 2); | 8254 | bank_offset = flash_bank * (hw->flash_bank_size * 2); |
@@ -8813,32 +8813,32 @@ static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw) | |||
8813 | return E1000_SUCCESS; | 8813 | return E1000_SUCCESS; |
8814 | 8814 | ||
8815 | /* Check if SW needs configure the PHY */ | 8815 | /* Check if SW needs configure the PHY */ |
8816 | reg_data = E1000_READ_REG(hw, FEXTNVM); | 8816 | reg_data = er32(FEXTNVM); |
8817 | if (!(reg_data & FEXTNVM_SW_CONFIG)) | 8817 | if (!(reg_data & FEXTNVM_SW_CONFIG)) |
8818 | return E1000_SUCCESS; | 8818 | return E1000_SUCCESS; |
8819 | 8819 | ||
8820 | /* Wait for basic configuration completes before proceeding*/ | 8820 | /* Wait for basic configuration completes before proceeding*/ |
8821 | loop = 0; | 8821 | loop = 0; |
8822 | do { | 8822 | do { |
8823 | reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE; | 8823 | reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE; |
8824 | udelay(100); | 8824 | udelay(100); |
8825 | loop++; | 8825 | loop++; |
8826 | } while ((!reg_data) && (loop < 50)); | 8826 | } while ((!reg_data) && (loop < 50)); |
8827 | 8827 | ||
8828 | /* Clear the Init Done bit for the next init event */ | 8828 | /* Clear the Init Done bit for the next init event */ |
8829 | reg_data = E1000_READ_REG(hw, STATUS); | 8829 | reg_data = er32(STATUS); |
8830 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; | 8830 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; |
8831 | E1000_WRITE_REG(hw, STATUS, reg_data); | 8831 | ew32(STATUS, reg_data); |
8832 | 8832 | ||
8833 | /* Make sure HW does not configure LCD from PHY extended configuration | 8833 | /* Make sure HW does not configure LCD from PHY extended configuration |
8834 | before SW configuration */ | 8834 | before SW configuration */ |
8835 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | 8835 | reg_data = er32(EXTCNF_CTRL); |
8836 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { | 8836 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { |
8837 | reg_data = E1000_READ_REG(hw, EXTCNF_SIZE); | 8837 | reg_data = er32(EXTCNF_SIZE); |
8838 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; | 8838 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; |
8839 | cnf_size >>= 16; | 8839 | cnf_size >>= 16; |
8840 | if (cnf_size) { | 8840 | if (cnf_size) { |
8841 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | 8841 | reg_data = er32(EXTCNF_CTRL); |
8842 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; | 8842 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; |
8843 | /* cnf_base_addr is in DWORD */ | 8843 | /* cnf_base_addr is in DWORD */ |
8844 | cnf_base_addr >>= 16; | 8844 | cnf_base_addr >>= 16; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 9a20ba39346b..ad1f052c3d3a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -269,12 +269,13 @@ module_exit(e1000_exit_module); | |||
269 | 269 | ||
270 | static int e1000_request_irq(struct e1000_adapter *adapter) | 270 | static int e1000_request_irq(struct e1000_adapter *adapter) |
271 | { | 271 | { |
272 | struct e1000_hw *hw = &adapter->hw; | ||
272 | struct net_device *netdev = adapter->netdev; | 273 | struct net_device *netdev = adapter->netdev; |
273 | irq_handler_t handler = e1000_intr; | 274 | irq_handler_t handler = e1000_intr; |
274 | int irq_flags = IRQF_SHARED; | 275 | int irq_flags = IRQF_SHARED; |
275 | int err; | 276 | int err; |
276 | 277 | ||
277 | if (adapter->hw.mac_type >= e1000_82571) { | 278 | if (hw->mac_type >= e1000_82571) { |
278 | adapter->have_msi = !pci_enable_msi(adapter->pdev); | 279 | adapter->have_msi = !pci_enable_msi(adapter->pdev); |
279 | if (adapter->have_msi) { | 280 | if (adapter->have_msi) { |
280 | handler = e1000_intr_msi; | 281 | handler = e1000_intr_msi; |
@@ -311,8 +312,10 @@ static void e1000_free_irq(struct e1000_adapter *adapter) | |||
311 | 312 | ||
312 | static void e1000_irq_disable(struct e1000_adapter *adapter) | 313 | static void e1000_irq_disable(struct e1000_adapter *adapter) |
313 | { | 314 | { |
314 | E1000_WRITE_REG(&adapter->hw, IMC, ~0); | 315 | struct e1000_hw *hw = &adapter->hw; |
315 | E1000_WRITE_FLUSH(&adapter->hw); | 316 | |
317 | ew32(IMC, ~0); | ||
318 | E1000_WRITE_FLUSH(); | ||
316 | synchronize_irq(adapter->pdev->irq); | 319 | synchronize_irq(adapter->pdev->irq); |
317 | } | 320 | } |
318 | 321 | ||
@@ -323,18 +326,21 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) | |||
323 | 326 | ||
324 | static void e1000_irq_enable(struct e1000_adapter *adapter) | 327 | static void e1000_irq_enable(struct e1000_adapter *adapter) |
325 | { | 328 | { |
326 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); | 329 | struct e1000_hw *hw = &adapter->hw; |
327 | E1000_WRITE_FLUSH(&adapter->hw); | 330 | |
331 | ew32(IMS, IMS_ENABLE_MASK); | ||
332 | E1000_WRITE_FLUSH(); | ||
328 | } | 333 | } |
329 | 334 | ||
330 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | 335 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
331 | { | 336 | { |
337 | struct e1000_hw *hw = &adapter->hw; | ||
332 | struct net_device *netdev = adapter->netdev; | 338 | struct net_device *netdev = adapter->netdev; |
333 | u16 vid = adapter->hw.mng_cookie.vlan_id; | 339 | u16 vid = hw->mng_cookie.vlan_id; |
334 | u16 old_vid = adapter->mng_vlan_id; | 340 | u16 old_vid = adapter->mng_vlan_id; |
335 | if (adapter->vlgrp) { | 341 | if (adapter->vlgrp) { |
336 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | 342 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
337 | if (adapter->hw.mng_cookie.status & | 343 | if (hw->mng_cookie.status & |
338 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | 344 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
339 | e1000_vlan_rx_add_vid(netdev, vid); | 345 | e1000_vlan_rx_add_vid(netdev, vid); |
340 | adapter->mng_vlan_id = vid; | 346 | adapter->mng_vlan_id = vid; |
@@ -365,21 +371,20 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter) | |||
365 | { | 371 | { |
366 | u32 ctrl_ext; | 372 | u32 ctrl_ext; |
367 | u32 swsm; | 373 | u32 swsm; |
374 | struct e1000_hw *hw = &adapter->hw; | ||
368 | 375 | ||
369 | /* Let firmware taken over control of h/w */ | 376 | /* Let firmware taken over control of h/w */ |
370 | switch (adapter->hw.mac_type) { | 377 | switch (hw->mac_type) { |
371 | case e1000_82573: | 378 | case e1000_82573: |
372 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 379 | swsm = er32(SWSM); |
373 | E1000_WRITE_REG(&adapter->hw, SWSM, | 380 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); |
374 | swsm & ~E1000_SWSM_DRV_LOAD); | ||
375 | break; | 381 | break; |
376 | case e1000_82571: | 382 | case e1000_82571: |
377 | case e1000_82572: | 383 | case e1000_82572: |
378 | case e1000_80003es2lan: | 384 | case e1000_80003es2lan: |
379 | case e1000_ich8lan: | 385 | case e1000_ich8lan: |
380 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 386 | ctrl_ext = er32(CTRL_EXT); |
381 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 387 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
382 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
383 | break; | 388 | break; |
384 | default: | 389 | default: |
385 | break; | 390 | break; |
@@ -401,21 +406,20 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
401 | { | 406 | { |
402 | u32 ctrl_ext; | 407 | u32 ctrl_ext; |
403 | u32 swsm; | 408 | u32 swsm; |
409 | struct e1000_hw *hw = &adapter->hw; | ||
404 | 410 | ||
405 | /* Let firmware know the driver has taken over */ | 411 | /* Let firmware know the driver has taken over */ |
406 | switch (adapter->hw.mac_type) { | 412 | switch (hw->mac_type) { |
407 | case e1000_82573: | 413 | case e1000_82573: |
408 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 414 | swsm = er32(SWSM); |
409 | E1000_WRITE_REG(&adapter->hw, SWSM, | 415 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); |
410 | swsm | E1000_SWSM_DRV_LOAD); | ||
411 | break; | 416 | break; |
412 | case e1000_82571: | 417 | case e1000_82571: |
413 | case e1000_82572: | 418 | case e1000_82572: |
414 | case e1000_80003es2lan: | 419 | case e1000_80003es2lan: |
415 | case e1000_ich8lan: | 420 | case e1000_ich8lan: |
416 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 421 | ctrl_ext = er32(CTRL_EXT); |
417 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 422 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
418 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
419 | break; | 423 | break; |
420 | default: | 424 | default: |
421 | break; | 425 | break; |
@@ -424,8 +428,10 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
424 | 428 | ||
425 | static void e1000_init_manageability(struct e1000_adapter *adapter) | 429 | static void e1000_init_manageability(struct e1000_adapter *adapter) |
426 | { | 430 | { |
431 | struct e1000_hw *hw = &adapter->hw; | ||
432 | |||
427 | if (adapter->en_mng_pt) { | 433 | if (adapter->en_mng_pt) { |
428 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); | 434 | u32 manc = er32(MANC); |
429 | 435 | ||
430 | /* disable hardware interception of ARP */ | 436 | /* disable hardware interception of ARP */ |
431 | manc &= ~(E1000_MANC_ARP_EN); | 437 | manc &= ~(E1000_MANC_ARP_EN); |
@@ -433,36 +439,38 @@ static void e1000_init_manageability(struct e1000_adapter *adapter) | |||
433 | /* enable receiving management packets to the host */ | 439 | /* enable receiving management packets to the host */ |
434 | /* this will probably generate destination unreachable messages | 440 | /* this will probably generate destination unreachable messages |
435 | * from the host OS, but the packets will be handled on SMBUS */ | 441 | * from the host OS, but the packets will be handled on SMBUS */ |
436 | if (adapter->hw.has_manc2h) { | 442 | if (hw->has_manc2h) { |
437 | u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H); | 443 | u32 manc2h = er32(MANC2H); |
438 | 444 | ||
439 | manc |= E1000_MANC_EN_MNG2HOST; | 445 | manc |= E1000_MANC_EN_MNG2HOST; |
440 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | 446 | #define E1000_MNG2HOST_PORT_623 (1 << 5) |
441 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | 447 | #define E1000_MNG2HOST_PORT_664 (1 << 6) |
442 | manc2h |= E1000_MNG2HOST_PORT_623; | 448 | manc2h |= E1000_MNG2HOST_PORT_623; |
443 | manc2h |= E1000_MNG2HOST_PORT_664; | 449 | manc2h |= E1000_MNG2HOST_PORT_664; |
444 | E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h); | 450 | ew32(MANC2H, manc2h); |
445 | } | 451 | } |
446 | 452 | ||
447 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 453 | ew32(MANC, manc); |
448 | } | 454 | } |
449 | } | 455 | } |
450 | 456 | ||
451 | static void e1000_release_manageability(struct e1000_adapter *adapter) | 457 | static void e1000_release_manageability(struct e1000_adapter *adapter) |
452 | { | 458 | { |
459 | struct e1000_hw *hw = &adapter->hw; | ||
460 | |||
453 | if (adapter->en_mng_pt) { | 461 | if (adapter->en_mng_pt) { |
454 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); | 462 | u32 manc = er32(MANC); |
455 | 463 | ||
456 | /* re-enable hardware interception of ARP */ | 464 | /* re-enable hardware interception of ARP */ |
457 | manc |= E1000_MANC_ARP_EN; | 465 | manc |= E1000_MANC_ARP_EN; |
458 | 466 | ||
459 | if (adapter->hw.has_manc2h) | 467 | if (hw->has_manc2h) |
460 | manc &= ~E1000_MANC_EN_MNG2HOST; | 468 | manc &= ~E1000_MANC_EN_MNG2HOST; |
461 | 469 | ||
462 | /* don't explicitly have to mess with MANC2H since | 470 | /* don't explicitly have to mess with MANC2H since |
463 | * MANC has an enable disable that gates MANC2H */ | 471 | * MANC has an enable disable that gates MANC2H */ |
464 | 472 | ||
465 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 473 | ew32(MANC, manc); |
466 | } | 474 | } |
467 | } | 475 | } |
468 | 476 | ||
@@ -497,6 +505,8 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
497 | 505 | ||
498 | int e1000_up(struct e1000_adapter *adapter) | 506 | int e1000_up(struct e1000_adapter *adapter) |
499 | { | 507 | { |
508 | struct e1000_hw *hw = &adapter->hw; | ||
509 | |||
500 | /* hardware has been reset, we need to reload some things */ | 510 | /* hardware has been reset, we need to reload some things */ |
501 | e1000_configure(adapter); | 511 | e1000_configure(adapter); |
502 | 512 | ||
@@ -508,7 +518,7 @@ int e1000_up(struct e1000_adapter *adapter) | |||
508 | e1000_irq_enable(adapter); | 518 | e1000_irq_enable(adapter); |
509 | 519 | ||
510 | /* fire a link change interrupt to start the watchdog */ | 520 | /* fire a link change interrupt to start the watchdog */ |
511 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | 521 | ew32(ICS, E1000_ICS_LSC); |
512 | return 0; | 522 | return 0; |
513 | } | 523 | } |
514 | 524 | ||
@@ -524,30 +534,33 @@ int e1000_up(struct e1000_adapter *adapter) | |||
524 | 534 | ||
525 | void e1000_power_up_phy(struct e1000_adapter *adapter) | 535 | void e1000_power_up_phy(struct e1000_adapter *adapter) |
526 | { | 536 | { |
537 | struct e1000_hw *hw = &adapter->hw; | ||
527 | u16 mii_reg = 0; | 538 | u16 mii_reg = 0; |
528 | 539 | ||
529 | /* Just clear the power down bit to wake the phy back up */ | 540 | /* Just clear the power down bit to wake the phy back up */ |
530 | if (adapter->hw.media_type == e1000_media_type_copper) { | 541 | if (hw->media_type == e1000_media_type_copper) { |
531 | /* according to the manual, the phy will retain its | 542 | /* according to the manual, the phy will retain its |
532 | * settings across a power-down/up cycle */ | 543 | * settings across a power-down/up cycle */ |
533 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 544 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
534 | mii_reg &= ~MII_CR_POWER_DOWN; | 545 | mii_reg &= ~MII_CR_POWER_DOWN; |
535 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | 546 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
536 | } | 547 | } |
537 | } | 548 | } |
538 | 549 | ||
539 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 550 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
540 | { | 551 | { |
552 | struct e1000_hw *hw = &adapter->hw; | ||
553 | |||
541 | /* Power down the PHY so no link is implied when interface is down * | 554 | /* Power down the PHY so no link is implied when interface is down * |
542 | * The PHY cannot be powered down if any of the following is true * | 555 | * The PHY cannot be powered down if any of the following is true * |
543 | * (a) WoL is enabled | 556 | * (a) WoL is enabled |
544 | * (b) AMT is active | 557 | * (b) AMT is active |
545 | * (c) SoL/IDER session is active */ | 558 | * (c) SoL/IDER session is active */ |
546 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 559 | if (!adapter->wol && hw->mac_type >= e1000_82540 && |
547 | adapter->hw.media_type == e1000_media_type_copper) { | 560 | hw->media_type == e1000_media_type_copper) { |
548 | u16 mii_reg = 0; | 561 | u16 mii_reg = 0; |
549 | 562 | ||
550 | switch (adapter->hw.mac_type) { | 563 | switch (hw->mac_type) { |
551 | case e1000_82540: | 564 | case e1000_82540: |
552 | case e1000_82545: | 565 | case e1000_82545: |
553 | case e1000_82545_rev_3: | 566 | case e1000_82545_rev_3: |
@@ -557,8 +570,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
557 | case e1000_82541_rev_2: | 570 | case e1000_82541_rev_2: |
558 | case e1000_82547: | 571 | case e1000_82547: |
559 | case e1000_82547_rev_2: | 572 | case e1000_82547_rev_2: |
560 | if (E1000_READ_REG(&adapter->hw, MANC) & | 573 | if (er32(MANC) & E1000_MANC_SMBUS_EN) |
561 | E1000_MANC_SMBUS_EN) | ||
562 | goto out; | 574 | goto out; |
563 | break; | 575 | break; |
564 | case e1000_82571: | 576 | case e1000_82571: |
@@ -566,16 +578,16 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
566 | case e1000_82573: | 578 | case e1000_82573: |
567 | case e1000_80003es2lan: | 579 | case e1000_80003es2lan: |
568 | case e1000_ich8lan: | 580 | case e1000_ich8lan: |
569 | if (e1000_check_mng_mode(&adapter->hw) || | 581 | if (e1000_check_mng_mode(hw) || |
570 | e1000_check_phy_reset_block(&adapter->hw)) | 582 | e1000_check_phy_reset_block(hw)) |
571 | goto out; | 583 | goto out; |
572 | break; | 584 | break; |
573 | default: | 585 | default: |
574 | goto out; | 586 | goto out; |
575 | } | 587 | } |
576 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 588 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
577 | mii_reg |= MII_CR_POWER_DOWN; | 589 | mii_reg |= MII_CR_POWER_DOWN; |
578 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | 590 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
579 | mdelay(1); | 591 | mdelay(1); |
580 | } | 592 | } |
581 | out: | 593 | out: |
@@ -622,6 +634,7 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) | |||
622 | 634 | ||
623 | void e1000_reset(struct e1000_adapter *adapter) | 635 | void e1000_reset(struct e1000_adapter *adapter) |
624 | { | 636 | { |
637 | struct e1000_hw *hw = &adapter->hw; | ||
625 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; | 638 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
626 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; | 639 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; |
627 | bool legacy_pba_adjust = false; | 640 | bool legacy_pba_adjust = false; |
@@ -630,7 +643,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
630 | * To take effect CTRL.RST is required. | 643 | * To take effect CTRL.RST is required. |
631 | */ | 644 | */ |
632 | 645 | ||
633 | switch (adapter->hw.mac_type) { | 646 | switch (hw->mac_type) { |
634 | case e1000_82542_rev2_0: | 647 | case e1000_82542_rev2_0: |
635 | case e1000_82542_rev2_1: | 648 | case e1000_82542_rev2_1: |
636 | case e1000_82543: | 649 | case e1000_82543: |
@@ -671,16 +684,16 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
671 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) | 684 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) |
672 | pba -= 8; /* allocate more FIFO for Tx */ | 685 | pba -= 8; /* allocate more FIFO for Tx */ |
673 | 686 | ||
674 | if (adapter->hw.mac_type == e1000_82547) { | 687 | if (hw->mac_type == e1000_82547) { |
675 | adapter->tx_fifo_head = 0; | 688 | adapter->tx_fifo_head = 0; |
676 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | 689 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
677 | adapter->tx_fifo_size = | 690 | adapter->tx_fifo_size = |
678 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; | 691 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; |
679 | atomic_set(&adapter->tx_fifo_stall, 0); | 692 | atomic_set(&adapter->tx_fifo_stall, 0); |
680 | } | 693 | } |
681 | } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { | 694 | } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { |
682 | /* adjust PBA for jumbo frames */ | 695 | /* adjust PBA for jumbo frames */ |
683 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 696 | ew32(PBA, pba); |
684 | 697 | ||
685 | /* To maintain wire speed transmits, the Tx FIFO should be | 698 | /* To maintain wire speed transmits, the Tx FIFO should be |
686 | * large enough to accomodate two full transmit packets, | 699 | * large enough to accomodate two full transmit packets, |
@@ -688,7 +701,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
688 | * the Rx FIFO should be large enough to accomodate at least | 701 | * the Rx FIFO should be large enough to accomodate at least |
689 | * one full receive packet and is similarly rounded up and | 702 | * one full receive packet and is similarly rounded up and |
690 | * expressed in KB. */ | 703 | * expressed in KB. */ |
691 | pba = E1000_READ_REG(&adapter->hw, PBA); | 704 | pba = er32(PBA); |
692 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 705 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
693 | tx_space = pba >> 16; | 706 | tx_space = pba >> 16; |
694 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 707 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
@@ -711,7 +724,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
711 | pba = pba - (min_tx_space - tx_space); | 724 | pba = pba - (min_tx_space - tx_space); |
712 | 725 | ||
713 | /* PCI/PCIx hardware has PBA alignment constraints */ | 726 | /* PCI/PCIx hardware has PBA alignment constraints */ |
714 | switch (adapter->hw.mac_type) { | 727 | switch (hw->mac_type) { |
715 | case e1000_82545 ... e1000_82546_rev_3: | 728 | case e1000_82545 ... e1000_82546_rev_3: |
716 | pba &= ~(E1000_PBA_8K - 1); | 729 | pba &= ~(E1000_PBA_8K - 1); |
717 | break; | 730 | break; |
@@ -722,7 +735,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
722 | /* if short on rx space, rx wins and must trump tx | 735 | /* if short on rx space, rx wins and must trump tx |
723 | * adjustment or use Early Receive if available */ | 736 | * adjustment or use Early Receive if available */ |
724 | if (pba < min_rx_space) { | 737 | if (pba < min_rx_space) { |
725 | switch (adapter->hw.mac_type) { | 738 | switch (hw->mac_type) { |
726 | case e1000_82573: | 739 | case e1000_82573: |
727 | /* ERT enabled in e1000_configure_rx */ | 740 | /* ERT enabled in e1000_configure_rx */ |
728 | break; | 741 | break; |
@@ -734,7 +747,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
734 | } | 747 | } |
735 | } | 748 | } |
736 | 749 | ||
737 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 750 | ew32(PBA, pba); |
738 | 751 | ||
739 | /* flow control settings */ | 752 | /* flow control settings */ |
740 | /* Set the FC high water mark to 90% of the FIFO size. | 753 | /* Set the FC high water mark to 90% of the FIFO size. |
@@ -747,54 +760,54 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
747 | if (pba < E1000_PBA_16K) | 760 | if (pba < E1000_PBA_16K) |
748 | fc_high_water_mark = (pba * 1024) - 1600; | 761 | fc_high_water_mark = (pba * 1024) - 1600; |
749 | 762 | ||
750 | adapter->hw.fc_high_water = fc_high_water_mark; | 763 | hw->fc_high_water = fc_high_water_mark; |
751 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 764 | hw->fc_low_water = fc_high_water_mark - 8; |
752 | if (adapter->hw.mac_type == e1000_80003es2lan) | 765 | if (hw->mac_type == e1000_80003es2lan) |
753 | adapter->hw.fc_pause_time = 0xFFFF; | 766 | hw->fc_pause_time = 0xFFFF; |
754 | else | 767 | else |
755 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 768 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; |
756 | adapter->hw.fc_send_xon = 1; | 769 | hw->fc_send_xon = 1; |
757 | adapter->hw.fc = adapter->hw.original_fc; | 770 | hw->fc = hw->original_fc; |
758 | 771 | ||
759 | /* Allow time for pending master requests to run */ | 772 | /* Allow time for pending master requests to run */ |
760 | e1000_reset_hw(&adapter->hw); | 773 | e1000_reset_hw(hw); |
761 | if (adapter->hw.mac_type >= e1000_82544) | 774 | if (hw->mac_type >= e1000_82544) |
762 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 775 | ew32(WUC, 0); |
763 | 776 | ||
764 | if (e1000_init_hw(&adapter->hw)) | 777 | if (e1000_init_hw(hw)) |
765 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 778 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
766 | e1000_update_mng_vlan(adapter); | 779 | e1000_update_mng_vlan(adapter); |
767 | 780 | ||
768 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ | 781 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ |
769 | if (adapter->hw.mac_type >= e1000_82544 && | 782 | if (hw->mac_type >= e1000_82544 && |
770 | adapter->hw.mac_type <= e1000_82547_rev_2 && | 783 | hw->mac_type <= e1000_82547_rev_2 && |
771 | adapter->hw.autoneg == 1 && | 784 | hw->autoneg == 1 && |
772 | adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { | 785 | hw->autoneg_advertised == ADVERTISE_1000_FULL) { |
773 | u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 786 | u32 ctrl = er32(CTRL); |
774 | /* clear phy power management bit if we are in gig only mode, | 787 | /* clear phy power management bit if we are in gig only mode, |
775 | * which if enabled will attempt negotiation to 100Mb, which | 788 | * which if enabled will attempt negotiation to 100Mb, which |
776 | * can cause a loss of link at power off or driver unload */ | 789 | * can cause a loss of link at power off or driver unload */ |
777 | ctrl &= ~E1000_CTRL_SWDPIN3; | 790 | ctrl &= ~E1000_CTRL_SWDPIN3; |
778 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 791 | ew32(CTRL, ctrl); |
779 | } | 792 | } |
780 | 793 | ||
781 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 794 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
782 | E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); | 795 | ew32(VET, ETHERNET_IEEE_VLAN_TYPE); |
783 | 796 | ||
784 | e1000_reset_adaptive(&adapter->hw); | 797 | e1000_reset_adaptive(hw); |
785 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 798 | e1000_phy_get_info(hw, &adapter->phy_info); |
786 | 799 | ||
787 | if (!adapter->smart_power_down && | 800 | if (!adapter->smart_power_down && |
788 | (adapter->hw.mac_type == e1000_82571 || | 801 | (hw->mac_type == e1000_82571 || |
789 | adapter->hw.mac_type == e1000_82572)) { | 802 | hw->mac_type == e1000_82572)) { |
790 | u16 phy_data = 0; | 803 | u16 phy_data = 0; |
791 | /* speed up time to link by disabling smart power down, ignore | 804 | /* speed up time to link by disabling smart power down, ignore |
792 | * the return value of this function because there is nothing | 805 | * the return value of this function because there is nothing |
793 | * different we would do if it failed */ | 806 | * different we would do if it failed */ |
794 | e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | 807 | e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
795 | &phy_data); | 808 | &phy_data); |
796 | phy_data &= ~IGP02E1000_PM_SPD; | 809 | phy_data &= ~IGP02E1000_PM_SPD; |
797 | e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | 810 | e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
798 | phy_data); | 811 | phy_data); |
799 | } | 812 | } |
800 | 813 | ||
@@ -871,12 +884,12 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) | |||
871 | * and a hardware reset occur. | 884 | * and a hardware reset occur. |
872 | **/ | 885 | **/ |
873 | 886 | ||
874 | static int __devinit | 887 | static int __devinit e1000_probe(struct pci_dev *pdev, |
875 | e1000_probe(struct pci_dev *pdev, | 888 | const struct pci_device_id *ent) |
876 | const struct pci_device_id *ent) | ||
877 | { | 889 | { |
878 | struct net_device *netdev; | 890 | struct net_device *netdev; |
879 | struct e1000_adapter *adapter; | 891 | struct e1000_adapter *adapter; |
892 | struct e1000_hw *hw; | ||
880 | 893 | ||
881 | static int cards_found = 0; | 894 | static int cards_found = 0; |
882 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ | 895 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ |
@@ -916,20 +929,22 @@ e1000_probe(struct pci_dev *pdev, | |||
916 | adapter = netdev_priv(netdev); | 929 | adapter = netdev_priv(netdev); |
917 | adapter->netdev = netdev; | 930 | adapter->netdev = netdev; |
918 | adapter->pdev = pdev; | 931 | adapter->pdev = pdev; |
919 | adapter->hw.back = adapter; | ||
920 | adapter->msg_enable = (1 << debug) - 1; | 932 | adapter->msg_enable = (1 << debug) - 1; |
921 | 933 | ||
934 | hw = &adapter->hw; | ||
935 | hw->back = adapter; | ||
936 | |||
922 | err = -EIO; | 937 | err = -EIO; |
923 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), | 938 | hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0), |
924 | pci_resource_len(pdev, BAR_0)); | 939 | pci_resource_len(pdev, BAR_0)); |
925 | if (!adapter->hw.hw_addr) | 940 | if (!hw->hw_addr) |
926 | goto err_ioremap; | 941 | goto err_ioremap; |
927 | 942 | ||
928 | for (i = BAR_1; i <= BAR_5; i++) { | 943 | for (i = BAR_1; i <= BAR_5; i++) { |
929 | if (pci_resource_len(pdev, i) == 0) | 944 | if (pci_resource_len(pdev, i) == 0) |
930 | continue; | 945 | continue; |
931 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 946 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
932 | adapter->hw.io_base = pci_resource_start(pdev, i); | 947 | hw->io_base = pci_resource_start(pdev, i); |
933 | break; | 948 | break; |
934 | } | 949 | } |
935 | } | 950 | } |
@@ -966,43 +981,43 @@ e1000_probe(struct pci_dev *pdev, | |||
966 | err = -EIO; | 981 | err = -EIO; |
967 | /* Flash BAR mapping must happen after e1000_sw_init | 982 | /* Flash BAR mapping must happen after e1000_sw_init |
968 | * because it depends on mac_type */ | 983 | * because it depends on mac_type */ |
969 | if ((adapter->hw.mac_type == e1000_ich8lan) && | 984 | if ((hw->mac_type == e1000_ich8lan) && |
970 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | 985 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
971 | adapter->hw.flash_address = | 986 | hw->flash_address = |
972 | ioremap(pci_resource_start(pdev, 1), | 987 | ioremap(pci_resource_start(pdev, 1), |
973 | pci_resource_len(pdev, 1)); | 988 | pci_resource_len(pdev, 1)); |
974 | if (!adapter->hw.flash_address) | 989 | if (!hw->flash_address) |
975 | goto err_flashmap; | 990 | goto err_flashmap; |
976 | } | 991 | } |
977 | 992 | ||
978 | if (e1000_check_phy_reset_block(&adapter->hw)) | 993 | if (e1000_check_phy_reset_block(hw)) |
979 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 994 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
980 | 995 | ||
981 | if (adapter->hw.mac_type >= e1000_82543) { | 996 | if (hw->mac_type >= e1000_82543) { |
982 | netdev->features = NETIF_F_SG | | 997 | netdev->features = NETIF_F_SG | |
983 | NETIF_F_HW_CSUM | | 998 | NETIF_F_HW_CSUM | |
984 | NETIF_F_HW_VLAN_TX | | 999 | NETIF_F_HW_VLAN_TX | |
985 | NETIF_F_HW_VLAN_RX | | 1000 | NETIF_F_HW_VLAN_RX | |
986 | NETIF_F_HW_VLAN_FILTER; | 1001 | NETIF_F_HW_VLAN_FILTER; |
987 | if (adapter->hw.mac_type == e1000_ich8lan) | 1002 | if (hw->mac_type == e1000_ich8lan) |
988 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; | 1003 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; |
989 | } | 1004 | } |
990 | 1005 | ||
991 | if ((adapter->hw.mac_type >= e1000_82544) && | 1006 | if ((hw->mac_type >= e1000_82544) && |
992 | (adapter->hw.mac_type != e1000_82547)) | 1007 | (hw->mac_type != e1000_82547)) |
993 | netdev->features |= NETIF_F_TSO; | 1008 | netdev->features |= NETIF_F_TSO; |
994 | 1009 | ||
995 | if (adapter->hw.mac_type > e1000_82547_rev_2) | 1010 | if (hw->mac_type > e1000_82547_rev_2) |
996 | netdev->features |= NETIF_F_TSO6; | 1011 | netdev->features |= NETIF_F_TSO6; |
997 | if (pci_using_dac) | 1012 | if (pci_using_dac) |
998 | netdev->features |= NETIF_F_HIGHDMA; | 1013 | netdev->features |= NETIF_F_HIGHDMA; |
999 | 1014 | ||
1000 | netdev->features |= NETIF_F_LLTX; | 1015 | netdev->features |= NETIF_F_LLTX; |
1001 | 1016 | ||
1002 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 1017 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
1003 | 1018 | ||
1004 | /* initialize eeprom parameters */ | 1019 | /* initialize eeprom parameters */ |
1005 | if (e1000_init_eeprom_params(&adapter->hw)) { | 1020 | if (e1000_init_eeprom_params(hw)) { |
1006 | E1000_ERR("EEPROM initialization failed\n"); | 1021 | E1000_ERR("EEPROM initialization failed\n"); |
1007 | goto err_eeprom; | 1022 | goto err_eeprom; |
1008 | } | 1023 | } |
@@ -1010,10 +1025,10 @@ e1000_probe(struct pci_dev *pdev, | |||
1010 | /* before reading the EEPROM, reset the controller to | 1025 | /* before reading the EEPROM, reset the controller to |
1011 | * put the device in a known good starting state */ | 1026 | * put the device in a known good starting state */ |
1012 | 1027 | ||
1013 | e1000_reset_hw(&adapter->hw); | 1028 | e1000_reset_hw(hw); |
1014 | 1029 | ||
1015 | /* make sure the EEPROM is good */ | 1030 | /* make sure the EEPROM is good */ |
1016 | if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { | 1031 | if (e1000_validate_eeprom_checksum(hw) < 0) { |
1017 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 1032 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
1018 | e1000_dump_eeprom(adapter); | 1033 | e1000_dump_eeprom(adapter); |
1019 | /* | 1034 | /* |
@@ -1024,20 +1039,20 @@ e1000_probe(struct pci_dev *pdev, | |||
1024 | * interface after manually setting a hw addr using | 1039 | * interface after manually setting a hw addr using |
1025 | * `ip set address` | 1040 | * `ip set address` |
1026 | */ | 1041 | */ |
1027 | memset(adapter->hw.mac_addr, 0, netdev->addr_len); | 1042 | memset(hw->mac_addr, 0, netdev->addr_len); |
1028 | } else { | 1043 | } else { |
1029 | /* copy the MAC address out of the EEPROM */ | 1044 | /* copy the MAC address out of the EEPROM */ |
1030 | if (e1000_read_mac_addr(&adapter->hw)) | 1045 | if (e1000_read_mac_addr(hw)) |
1031 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); | 1046 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); |
1032 | } | 1047 | } |
1033 | /* don't block initalization here due to bad MAC address */ | 1048 | /* don't block initalization here due to bad MAC address */ |
1034 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | 1049 | memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); |
1035 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | 1050 | memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); |
1036 | 1051 | ||
1037 | if (!is_valid_ether_addr(netdev->perm_addr)) | 1052 | if (!is_valid_ether_addr(netdev->perm_addr)) |
1038 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 1053 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
1039 | 1054 | ||
1040 | e1000_get_bus_info(&adapter->hw); | 1055 | e1000_get_bus_info(hw); |
1041 | 1056 | ||
1042 | init_timer(&adapter->tx_fifo_stall_timer); | 1057 | init_timer(&adapter->tx_fifo_stall_timer); |
1043 | adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; | 1058 | adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; |
@@ -1060,18 +1075,18 @@ e1000_probe(struct pci_dev *pdev, | |||
1060 | * enable the ACPI Magic Packet filter | 1075 | * enable the ACPI Magic Packet filter |
1061 | */ | 1076 | */ |
1062 | 1077 | ||
1063 | switch (adapter->hw.mac_type) { | 1078 | switch (hw->mac_type) { |
1064 | case e1000_82542_rev2_0: | 1079 | case e1000_82542_rev2_0: |
1065 | case e1000_82542_rev2_1: | 1080 | case e1000_82542_rev2_1: |
1066 | case e1000_82543: | 1081 | case e1000_82543: |
1067 | break; | 1082 | break; |
1068 | case e1000_82544: | 1083 | case e1000_82544: |
1069 | e1000_read_eeprom(&adapter->hw, | 1084 | e1000_read_eeprom(hw, |
1070 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); | 1085 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
1071 | eeprom_apme_mask = E1000_EEPROM_82544_APM; | 1086 | eeprom_apme_mask = E1000_EEPROM_82544_APM; |
1072 | break; | 1087 | break; |
1073 | case e1000_ich8lan: | 1088 | case e1000_ich8lan: |
1074 | e1000_read_eeprom(&adapter->hw, | 1089 | e1000_read_eeprom(hw, |
1075 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); | 1090 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); |
1076 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; | 1091 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; |
1077 | break; | 1092 | break; |
@@ -1079,14 +1094,14 @@ e1000_probe(struct pci_dev *pdev, | |||
1079 | case e1000_82546_rev_3: | 1094 | case e1000_82546_rev_3: |
1080 | case e1000_82571: | 1095 | case e1000_82571: |
1081 | case e1000_80003es2lan: | 1096 | case e1000_80003es2lan: |
1082 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 1097 | if (er32(STATUS) & E1000_STATUS_FUNC_1){ |
1083 | e1000_read_eeprom(&adapter->hw, | 1098 | e1000_read_eeprom(hw, |
1084 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 1099 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
1085 | break; | 1100 | break; |
1086 | } | 1101 | } |
1087 | /* Fall Through */ | 1102 | /* Fall Through */ |
1088 | default: | 1103 | default: |
1089 | e1000_read_eeprom(&adapter->hw, | 1104 | e1000_read_eeprom(hw, |
1090 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 1105 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
1091 | break; | 1106 | break; |
1092 | } | 1107 | } |
@@ -1105,7 +1120,7 @@ e1000_probe(struct pci_dev *pdev, | |||
1105 | case E1000_DEV_ID_82571EB_FIBER: | 1120 | case E1000_DEV_ID_82571EB_FIBER: |
1106 | /* Wake events only supported on port A for dual fiber | 1121 | /* Wake events only supported on port A for dual fiber |
1107 | * regardless of eeprom setting */ | 1122 | * regardless of eeprom setting */ |
1108 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) | 1123 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
1109 | adapter->eeprom_wol = 0; | 1124 | adapter->eeprom_wol = 0; |
1110 | break; | 1125 | break; |
1111 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1126 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
@@ -1128,8 +1143,6 @@ e1000_probe(struct pci_dev *pdev, | |||
1128 | adapter->wol = adapter->eeprom_wol; | 1143 | adapter->wol = adapter->eeprom_wol; |
1129 | 1144 | ||
1130 | /* print bus type/speed/width info */ | 1145 | /* print bus type/speed/width info */ |
1131 | { | ||
1132 | struct e1000_hw *hw = &adapter->hw; | ||
1133 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", | 1146 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", |
1134 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : | 1147 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : |
1135 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), | 1148 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), |
@@ -1142,11 +1155,10 @@ e1000_probe(struct pci_dev *pdev, | |||
1142 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : | 1155 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : |
1143 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : | 1156 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : |
1144 | "32-bit")); | 1157 | "32-bit")); |
1145 | } | ||
1146 | 1158 | ||
1147 | printk("%s\n", print_mac(mac, netdev->dev_addr)); | 1159 | printk("%s\n", print_mac(mac, netdev->dev_addr)); |
1148 | 1160 | ||
1149 | if (adapter->hw.bus_type == e1000_bus_type_pci_express) { | 1161 | if (hw->bus_type == e1000_bus_type_pci_express) { |
1150 | DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " | 1162 | DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " |
1151 | "longer be supported by this driver in the future.\n", | 1163 | "longer be supported by this driver in the future.\n", |
1152 | pdev->vendor, pdev->device); | 1164 | pdev->vendor, pdev->device); |
@@ -1161,8 +1173,8 @@ e1000_probe(struct pci_dev *pdev, | |||
1161 | * DRV_LOAD until the interface is up. For all other cases, | 1173 | * DRV_LOAD until the interface is up. For all other cases, |
1162 | * let the f/w know that the h/w is now under the control | 1174 | * let the f/w know that the h/w is now under the control |
1163 | * of the driver. */ | 1175 | * of the driver. */ |
1164 | if (adapter->hw.mac_type != e1000_82573 || | 1176 | if (hw->mac_type != e1000_82573 || |
1165 | !e1000_check_mng_mode(&adapter->hw)) | 1177 | !e1000_check_mng_mode(hw)) |
1166 | e1000_get_hw_control(adapter); | 1178 | e1000_get_hw_control(adapter); |
1167 | 1179 | ||
1168 | /* tell the stack to leave us alone until e1000_open() is called */ | 1180 | /* tell the stack to leave us alone until e1000_open() is called */ |
@@ -1181,11 +1193,11 @@ e1000_probe(struct pci_dev *pdev, | |||
1181 | err_register: | 1193 | err_register: |
1182 | e1000_release_hw_control(adapter); | 1194 | e1000_release_hw_control(adapter); |
1183 | err_eeprom: | 1195 | err_eeprom: |
1184 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 1196 | if (!e1000_check_phy_reset_block(hw)) |
1185 | e1000_phy_hw_reset(&adapter->hw); | 1197 | e1000_phy_hw_reset(hw); |
1186 | 1198 | ||
1187 | if (adapter->hw.flash_address) | 1199 | if (hw->flash_address) |
1188 | iounmap(adapter->hw.flash_address); | 1200 | iounmap(hw->flash_address); |
1189 | err_flashmap: | 1201 | err_flashmap: |
1190 | #ifdef CONFIG_E1000_NAPI | 1202 | #ifdef CONFIG_E1000_NAPI |
1191 | for (i = 0; i < adapter->num_rx_queues; i++) | 1203 | for (i = 0; i < adapter->num_rx_queues; i++) |
@@ -1198,7 +1210,7 @@ err_flashmap: | |||
1198 | kfree(adapter->polling_netdev); | 1210 | kfree(adapter->polling_netdev); |
1199 | #endif | 1211 | #endif |
1200 | err_sw_init: | 1212 | err_sw_init: |
1201 | iounmap(adapter->hw.hw_addr); | 1213 | iounmap(hw->hw_addr); |
1202 | err_ioremap: | 1214 | err_ioremap: |
1203 | free_netdev(netdev); | 1215 | free_netdev(netdev); |
1204 | err_alloc_etherdev: | 1216 | err_alloc_etherdev: |
@@ -1223,6 +1235,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
1223 | { | 1235 | { |
1224 | struct net_device *netdev = pci_get_drvdata(pdev); | 1236 | struct net_device *netdev = pci_get_drvdata(pdev); |
1225 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1237 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1238 | struct e1000_hw *hw = &adapter->hw; | ||
1226 | #ifdef CONFIG_E1000_NAPI | 1239 | #ifdef CONFIG_E1000_NAPI |
1227 | int i; | 1240 | int i; |
1228 | #endif | 1241 | #endif |
@@ -1242,8 +1255,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
1242 | 1255 | ||
1243 | unregister_netdev(netdev); | 1256 | unregister_netdev(netdev); |
1244 | 1257 | ||
1245 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 1258 | if (!e1000_check_phy_reset_block(hw)) |
1246 | e1000_phy_hw_reset(&adapter->hw); | 1259 | e1000_phy_hw_reset(hw); |
1247 | 1260 | ||
1248 | kfree(adapter->tx_ring); | 1261 | kfree(adapter->tx_ring); |
1249 | kfree(adapter->rx_ring); | 1262 | kfree(adapter->rx_ring); |
@@ -1251,9 +1264,9 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
1251 | kfree(adapter->polling_netdev); | 1264 | kfree(adapter->polling_netdev); |
1252 | #endif | 1265 | #endif |
1253 | 1266 | ||
1254 | iounmap(adapter->hw.hw_addr); | 1267 | iounmap(hw->hw_addr); |
1255 | if (adapter->hw.flash_address) | 1268 | if (hw->flash_address) |
1256 | iounmap(adapter->hw.flash_address); | 1269 | iounmap(hw->flash_address); |
1257 | pci_release_regions(pdev); | 1270 | pci_release_regions(pdev); |
1258 | 1271 | ||
1259 | free_netdev(netdev); | 1272 | free_netdev(netdev); |
@@ -1407,6 +1420,7 @@ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1407 | static int e1000_open(struct net_device *netdev) | 1420 | static int e1000_open(struct net_device *netdev) |
1408 | { | 1421 | { |
1409 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1422 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1423 | struct e1000_hw *hw = &adapter->hw; | ||
1410 | int err; | 1424 | int err; |
1411 | 1425 | ||
1412 | /* disallow open during test */ | 1426 | /* disallow open during test */ |
@@ -1426,15 +1440,15 @@ static int e1000_open(struct net_device *netdev) | |||
1426 | e1000_power_up_phy(adapter); | 1440 | e1000_power_up_phy(adapter); |
1427 | 1441 | ||
1428 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1442 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1429 | if ((adapter->hw.mng_cookie.status & | 1443 | if ((hw->mng_cookie.status & |
1430 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1444 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1431 | e1000_update_mng_vlan(adapter); | 1445 | e1000_update_mng_vlan(adapter); |
1432 | } | 1446 | } |
1433 | 1447 | ||
1434 | /* If AMT is enabled, let the firmware know that the network | 1448 | /* If AMT is enabled, let the firmware know that the network |
1435 | * interface is now open */ | 1449 | * interface is now open */ |
1436 | if (adapter->hw.mac_type == e1000_82573 && | 1450 | if (hw->mac_type == e1000_82573 && |
1437 | e1000_check_mng_mode(&adapter->hw)) | 1451 | e1000_check_mng_mode(hw)) |
1438 | e1000_get_hw_control(adapter); | 1452 | e1000_get_hw_control(adapter); |
1439 | 1453 | ||
1440 | /* before we allocate an interrupt, we must be ready to handle it. | 1454 | /* before we allocate an interrupt, we must be ready to handle it. |
@@ -1459,7 +1473,7 @@ static int e1000_open(struct net_device *netdev) | |||
1459 | netif_start_queue(netdev); | 1473 | netif_start_queue(netdev); |
1460 | 1474 | ||
1461 | /* fire a link status change interrupt to start the watchdog */ | 1475 | /* fire a link status change interrupt to start the watchdog */ |
1462 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | 1476 | ew32(ICS, E1000_ICS_LSC); |
1463 | 1477 | ||
1464 | return E1000_SUCCESS; | 1478 | return E1000_SUCCESS; |
1465 | 1479 | ||
@@ -1490,6 +1504,7 @@ err_setup_tx: | |||
1490 | static int e1000_close(struct net_device *netdev) | 1504 | static int e1000_close(struct net_device *netdev) |
1491 | { | 1505 | { |
1492 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1506 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1507 | struct e1000_hw *hw = &adapter->hw; | ||
1493 | 1508 | ||
1494 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 1509 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1495 | e1000_down(adapter); | 1510 | e1000_down(adapter); |
@@ -1501,7 +1516,7 @@ static int e1000_close(struct net_device *netdev) | |||
1501 | 1516 | ||
1502 | /* kill manageability vlan ID if supported, but not if a vlan with | 1517 | /* kill manageability vlan ID if supported, but not if a vlan with |
1503 | * the same ID is registered on the host OS (let 8021q kill it) */ | 1518 | * the same ID is registered on the host OS (let 8021q kill it) */ |
1504 | if ((adapter->hw.mng_cookie.status & | 1519 | if ((hw->mng_cookie.status & |
1505 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 1520 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
1506 | !(adapter->vlgrp && | 1521 | !(adapter->vlgrp && |
1507 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { | 1522 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { |
@@ -1510,8 +1525,8 @@ static int e1000_close(struct net_device *netdev) | |||
1510 | 1525 | ||
1511 | /* If AMT is enabled, let the firmware know that the network | 1526 | /* If AMT is enabled, let the firmware know that the network |
1512 | * interface is now closed */ | 1527 | * interface is now closed */ |
1513 | if (adapter->hw.mac_type == e1000_82573 && | 1528 | if (hw->mac_type == e1000_82573 && |
1514 | e1000_check_mng_mode(&adapter->hw)) | 1529 | e1000_check_mng_mode(hw)) |
1515 | e1000_release_hw_control(adapter); | 1530 | e1000_release_hw_control(adapter); |
1516 | 1531 | ||
1517 | return 0; | 1532 | return 0; |
@@ -1526,13 +1541,14 @@ static int e1000_close(struct net_device *netdev) | |||
1526 | static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, | 1541 | static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, |
1527 | unsigned long len) | 1542 | unsigned long len) |
1528 | { | 1543 | { |
1544 | struct e1000_hw *hw = &adapter->hw; | ||
1529 | unsigned long begin = (unsigned long) start; | 1545 | unsigned long begin = (unsigned long) start; |
1530 | unsigned long end = begin + len; | 1546 | unsigned long end = begin + len; |
1531 | 1547 | ||
1532 | /* First rev 82545 and 82546 need to not allow any memory | 1548 | /* First rev 82545 and 82546 need to not allow any memory |
1533 | * write location to cross 64k boundary due to errata 23 */ | 1549 | * write location to cross 64k boundary due to errata 23 */ |
1534 | if (adapter->hw.mac_type == e1000_82545 || | 1550 | if (hw->mac_type == e1000_82545 || |
1535 | adapter->hw.mac_type == e1000_82546) { | 1551 | hw->mac_type == e1000_82546) { |
1536 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; | 1552 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; |
1537 | } | 1553 | } |
1538 | 1554 | ||
@@ -1663,18 +1679,18 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1663 | tdba = adapter->tx_ring[0].dma; | 1679 | tdba = adapter->tx_ring[0].dma; |
1664 | tdlen = adapter->tx_ring[0].count * | 1680 | tdlen = adapter->tx_ring[0].count * |
1665 | sizeof(struct e1000_tx_desc); | 1681 | sizeof(struct e1000_tx_desc); |
1666 | E1000_WRITE_REG(hw, TDLEN, tdlen); | 1682 | ew32(TDLEN, tdlen); |
1667 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); | 1683 | ew32(TDBAH, (tdba >> 32)); |
1668 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | 1684 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); |
1669 | E1000_WRITE_REG(hw, TDT, 0); | 1685 | ew32(TDT, 0); |
1670 | E1000_WRITE_REG(hw, TDH, 0); | 1686 | ew32(TDH, 0); |
1671 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); | 1687 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); |
1672 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); | 1688 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); |
1673 | break; | 1689 | break; |
1674 | } | 1690 | } |
1675 | 1691 | ||
1676 | /* Set the default values for the Tx Inter Packet Gap timer */ | 1692 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1677 | if (adapter->hw.mac_type <= e1000_82547_rev_2 && | 1693 | if (hw->mac_type <= e1000_82547_rev_2 && |
1678 | (hw->media_type == e1000_media_type_fiber || | 1694 | (hw->media_type == e1000_media_type_fiber || |
1679 | hw->media_type == e1000_media_type_internal_serdes)) | 1695 | hw->media_type == e1000_media_type_internal_serdes)) |
1680 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | 1696 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
@@ -1699,34 +1715,34 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1699 | } | 1715 | } |
1700 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | 1716 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; |
1701 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | 1717 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; |
1702 | E1000_WRITE_REG(hw, TIPG, tipg); | 1718 | ew32(TIPG, tipg); |
1703 | 1719 | ||
1704 | /* Set the Tx Interrupt Delay register */ | 1720 | /* Set the Tx Interrupt Delay register */ |
1705 | 1721 | ||
1706 | E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay); | 1722 | ew32(TIDV, adapter->tx_int_delay); |
1707 | if (hw->mac_type >= e1000_82540) | 1723 | if (hw->mac_type >= e1000_82540) |
1708 | E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay); | 1724 | ew32(TADV, adapter->tx_abs_int_delay); |
1709 | 1725 | ||
1710 | /* Program the Transmit Control Register */ | 1726 | /* Program the Transmit Control Register */ |
1711 | 1727 | ||
1712 | tctl = E1000_READ_REG(hw, TCTL); | 1728 | tctl = er32(TCTL); |
1713 | tctl &= ~E1000_TCTL_CT; | 1729 | tctl &= ~E1000_TCTL_CT; |
1714 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | 1730 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1715 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1731 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1716 | 1732 | ||
1717 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1733 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1718 | tarc = E1000_READ_REG(hw, TARC0); | 1734 | tarc = er32(TARC0); |
1719 | /* set the speed mode bit, we'll clear it if we're not at | 1735 | /* set the speed mode bit, we'll clear it if we're not at |
1720 | * gigabit link later */ | 1736 | * gigabit link later */ |
1721 | tarc |= (1 << 21); | 1737 | tarc |= (1 << 21); |
1722 | E1000_WRITE_REG(hw, TARC0, tarc); | 1738 | ew32(TARC0, tarc); |
1723 | } else if (hw->mac_type == e1000_80003es2lan) { | 1739 | } else if (hw->mac_type == e1000_80003es2lan) { |
1724 | tarc = E1000_READ_REG(hw, TARC0); | 1740 | tarc = er32(TARC0); |
1725 | tarc |= 1; | 1741 | tarc |= 1; |
1726 | E1000_WRITE_REG(hw, TARC0, tarc); | 1742 | ew32(TARC0, tarc); |
1727 | tarc = E1000_READ_REG(hw, TARC1); | 1743 | tarc = er32(TARC1); |
1728 | tarc |= 1; | 1744 | tarc |= 1; |
1729 | E1000_WRITE_REG(hw, TARC1, tarc); | 1745 | ew32(TARC1, tarc); |
1730 | } | 1746 | } |
1731 | 1747 | ||
1732 | e1000_config_collision_dist(hw); | 1748 | e1000_config_collision_dist(hw); |
@@ -1749,7 +1765,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1749 | hw->bus_type == e1000_bus_type_pcix) | 1765 | hw->bus_type == e1000_bus_type_pcix) |
1750 | adapter->pcix_82544 = 1; | 1766 | adapter->pcix_82544 = 1; |
1751 | 1767 | ||
1752 | E1000_WRITE_REG(hw, TCTL, tctl); | 1768 | ew32(TCTL, tctl); |
1753 | 1769 | ||
1754 | } | 1770 | } |
1755 | 1771 | ||
@@ -1764,6 +1780,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1764 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | 1780 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
1765 | struct e1000_rx_ring *rxdr) | 1781 | struct e1000_rx_ring *rxdr) |
1766 | { | 1782 | { |
1783 | struct e1000_hw *hw = &adapter->hw; | ||
1767 | struct pci_dev *pdev = adapter->pdev; | 1784 | struct pci_dev *pdev = adapter->pdev; |
1768 | int size, desc_len; | 1785 | int size, desc_len; |
1769 | 1786 | ||
@@ -1796,7 +1813,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1796 | return -ENOMEM; | 1813 | return -ENOMEM; |
1797 | } | 1814 | } |
1798 | 1815 | ||
1799 | if (adapter->hw.mac_type <= e1000_82547_rev_2) | 1816 | if (hw->mac_type <= e1000_82547_rev_2) |
1800 | desc_len = sizeof(struct e1000_rx_desc); | 1817 | desc_len = sizeof(struct e1000_rx_desc); |
1801 | else | 1818 | else |
1802 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1819 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
@@ -1892,21 +1909,22 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |||
1892 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | 1909 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) |
1893 | static void e1000_setup_rctl(struct e1000_adapter *adapter) | 1910 | static void e1000_setup_rctl(struct e1000_adapter *adapter) |
1894 | { | 1911 | { |
1912 | struct e1000_hw *hw = &adapter->hw; | ||
1895 | u32 rctl, rfctl; | 1913 | u32 rctl, rfctl; |
1896 | u32 psrctl = 0; | 1914 | u32 psrctl = 0; |
1897 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1915 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
1898 | u32 pages = 0; | 1916 | u32 pages = 0; |
1899 | #endif | 1917 | #endif |
1900 | 1918 | ||
1901 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1919 | rctl = er32(RCTL); |
1902 | 1920 | ||
1903 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | 1921 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
1904 | 1922 | ||
1905 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | 1923 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | |
1906 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1924 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1907 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1925 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
1908 | 1926 | ||
1909 | if (adapter->hw.tbi_compatibility_on == 1) | 1927 | if (hw->tbi_compatibility_on == 1) |
1910 | rctl |= E1000_RCTL_SBP; | 1928 | rctl |= E1000_RCTL_SBP; |
1911 | else | 1929 | else |
1912 | rctl &= ~E1000_RCTL_SBP; | 1930 | rctl &= ~E1000_RCTL_SBP; |
@@ -1959,7 +1977,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1959 | /* allocations using alloc_page take too long for regular MTU | 1977 | /* allocations using alloc_page take too long for regular MTU |
1960 | * so only enable packet split for jumbo frames */ | 1978 | * so only enable packet split for jumbo frames */ |
1961 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); | 1979 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); |
1962 | if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) && | 1980 | if ((hw->mac_type >= e1000_82571) && (pages <= 3) && |
1963 | PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) | 1981 | PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) |
1964 | adapter->rx_ps_pages = pages; | 1982 | adapter->rx_ps_pages = pages; |
1965 | else | 1983 | else |
@@ -1967,14 +1985,14 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1967 | #endif | 1985 | #endif |
1968 | if (adapter->rx_ps_pages) { | 1986 | if (adapter->rx_ps_pages) { |
1969 | /* Configure extra packet-split registers */ | 1987 | /* Configure extra packet-split registers */ |
1970 | rfctl = E1000_READ_REG(&adapter->hw, RFCTL); | 1988 | rfctl = er32(RFCTL); |
1971 | rfctl |= E1000_RFCTL_EXTEN; | 1989 | rfctl |= E1000_RFCTL_EXTEN; |
1972 | /* disable packet split support for IPv6 extension headers, | 1990 | /* disable packet split support for IPv6 extension headers, |
1973 | * because some malformed IPv6 headers can hang the RX */ | 1991 | * because some malformed IPv6 headers can hang the RX */ |
1974 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | 1992 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | |
1975 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | 1993 | E1000_RFCTL_NEW_IPV6_EXT_DIS); |
1976 | 1994 | ||
1977 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1995 | ew32(RFCTL, rfctl); |
1978 | 1996 | ||
1979 | rctl |= E1000_RCTL_DTYP_PS; | 1997 | rctl |= E1000_RCTL_DTYP_PS; |
1980 | 1998 | ||
@@ -1994,10 +2012,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1994 | break; | 2012 | break; |
1995 | } | 2013 | } |
1996 | 2014 | ||
1997 | E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); | 2015 | ew32(PSRCTL, psrctl); |
1998 | } | 2016 | } |
1999 | 2017 | ||
2000 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2018 | ew32(RCTL, rctl); |
2001 | } | 2019 | } |
2002 | 2020 | ||
2003 | /** | 2021 | /** |
@@ -2027,30 +2045,29 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2027 | } | 2045 | } |
2028 | 2046 | ||
2029 | /* disable receives while setting up the descriptors */ | 2047 | /* disable receives while setting up the descriptors */ |
2030 | rctl = E1000_READ_REG(hw, RCTL); | 2048 | rctl = er32(RCTL); |
2031 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 2049 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
2032 | 2050 | ||
2033 | /* set the Receive Delay Timer Register */ | 2051 | /* set the Receive Delay Timer Register */ |
2034 | E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); | 2052 | ew32(RDTR, adapter->rx_int_delay); |
2035 | 2053 | ||
2036 | if (hw->mac_type >= e1000_82540) { | 2054 | if (hw->mac_type >= e1000_82540) { |
2037 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 2055 | ew32(RADV, adapter->rx_abs_int_delay); |
2038 | if (adapter->itr_setting != 0) | 2056 | if (adapter->itr_setting != 0) |
2039 | E1000_WRITE_REG(hw, ITR, | 2057 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2040 | 1000000000 / (adapter->itr * 256)); | ||
2041 | } | 2058 | } |
2042 | 2059 | ||
2043 | if (hw->mac_type >= e1000_82571) { | 2060 | if (hw->mac_type >= e1000_82571) { |
2044 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 2061 | ctrl_ext = er32(CTRL_EXT); |
2045 | /* Reset delay timers after every interrupt */ | 2062 | /* Reset delay timers after every interrupt */ |
2046 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | 2063 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
2047 | #ifdef CONFIG_E1000_NAPI | 2064 | #ifdef CONFIG_E1000_NAPI |
2048 | /* Auto-Mask interrupts upon ICR access */ | 2065 | /* Auto-Mask interrupts upon ICR access */ |
2049 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2066 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2050 | E1000_WRITE_REG(hw, IAM, 0xffffffff); | 2067 | ew32(IAM, 0xffffffff); |
2051 | #endif | 2068 | #endif |
2052 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 2069 | ew32(CTRL_EXT, ctrl_ext); |
2053 | E1000_WRITE_FLUSH(hw); | 2070 | E1000_WRITE_FLUSH(); |
2054 | } | 2071 | } |
2055 | 2072 | ||
2056 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 2073 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
@@ -2059,11 +2076,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2059 | case 1: | 2076 | case 1: |
2060 | default: | 2077 | default: |
2061 | rdba = adapter->rx_ring[0].dma; | 2078 | rdba = adapter->rx_ring[0].dma; |
2062 | E1000_WRITE_REG(hw, RDLEN, rdlen); | 2079 | ew32(RDLEN, rdlen); |
2063 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); | 2080 | ew32(RDBAH, (rdba >> 32)); |
2064 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | 2081 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); |
2065 | E1000_WRITE_REG(hw, RDT, 0); | 2082 | ew32(RDT, 0); |
2066 | E1000_WRITE_REG(hw, RDH, 0); | 2083 | ew32(RDH, 0); |
2067 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); | 2084 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); |
2068 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); | 2085 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); |
2069 | break; | 2086 | break; |
@@ -2071,7 +2088,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2071 | 2088 | ||
2072 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 2089 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
2073 | if (hw->mac_type >= e1000_82543) { | 2090 | if (hw->mac_type >= e1000_82543) { |
2074 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 2091 | rxcsum = er32(RXCSUM); |
2075 | if (adapter->rx_csum) { | 2092 | if (adapter->rx_csum) { |
2076 | rxcsum |= E1000_RXCSUM_TUOFL; | 2093 | rxcsum |= E1000_RXCSUM_TUOFL; |
2077 | 2094 | ||
@@ -2085,17 +2102,17 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2085 | rxcsum &= ~E1000_RXCSUM_TUOFL; | 2102 | rxcsum &= ~E1000_RXCSUM_TUOFL; |
2086 | /* don't need to clear IPPCSE as it defaults to 0 */ | 2103 | /* don't need to clear IPPCSE as it defaults to 0 */ |
2087 | } | 2104 | } |
2088 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 2105 | ew32(RXCSUM, rxcsum); |
2089 | } | 2106 | } |
2090 | 2107 | ||
2091 | /* enable early receives on 82573, only takes effect if using > 2048 | 2108 | /* enable early receives on 82573, only takes effect if using > 2048 |
2092 | * byte total frame size. for example only for jumbo frames */ | 2109 | * byte total frame size. for example only for jumbo frames */ |
2093 | #define E1000_ERT_2048 0x100 | 2110 | #define E1000_ERT_2048 0x100 |
2094 | if (hw->mac_type == e1000_82573) | 2111 | if (hw->mac_type == e1000_82573) |
2095 | E1000_WRITE_REG(hw, ERT, E1000_ERT_2048); | 2112 | ew32(ERT, E1000_ERT_2048); |
2096 | 2113 | ||
2097 | /* Enable Receives */ | 2114 | /* Enable Receives */ |
2098 | E1000_WRITE_REG(hw, RCTL, rctl); | 2115 | ew32(RCTL, rctl); |
2099 | } | 2116 | } |
2100 | 2117 | ||
2101 | /** | 2118 | /** |
@@ -2162,6 +2179,7 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |||
2162 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | 2179 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
2163 | struct e1000_tx_ring *tx_ring) | 2180 | struct e1000_tx_ring *tx_ring) |
2164 | { | 2181 | { |
2182 | struct e1000_hw *hw = &adapter->hw; | ||
2165 | struct e1000_buffer *buffer_info; | 2183 | struct e1000_buffer *buffer_info; |
2166 | unsigned long size; | 2184 | unsigned long size; |
2167 | unsigned int i; | 2185 | unsigned int i; |
@@ -2184,8 +2202,8 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
2184 | tx_ring->next_to_clean = 0; | 2202 | tx_ring->next_to_clean = 0; |
2185 | tx_ring->last_tx_tso = 0; | 2203 | tx_ring->last_tx_tso = 0; |
2186 | 2204 | ||
2187 | writel(0, adapter->hw.hw_addr + tx_ring->tdh); | 2205 | writel(0, hw->hw_addr + tx_ring->tdh); |
2188 | writel(0, adapter->hw.hw_addr + tx_ring->tdt); | 2206 | writel(0, hw->hw_addr + tx_ring->tdt); |
2189 | } | 2207 | } |
2190 | 2208 | ||
2191 | /** | 2209 | /** |
@@ -2252,6 +2270,7 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) | |||
2252 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | 2270 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
2253 | struct e1000_rx_ring *rx_ring) | 2271 | struct e1000_rx_ring *rx_ring) |
2254 | { | 2272 | { |
2273 | struct e1000_hw *hw = &adapter->hw; | ||
2255 | struct e1000_buffer *buffer_info; | 2274 | struct e1000_buffer *buffer_info; |
2256 | struct e1000_ps_page *ps_page; | 2275 | struct e1000_ps_page *ps_page; |
2257 | struct e1000_ps_page_dma *ps_page_dma; | 2276 | struct e1000_ps_page_dma *ps_page_dma; |
@@ -2298,8 +2317,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2298 | rx_ring->next_to_clean = 0; | 2317 | rx_ring->next_to_clean = 0; |
2299 | rx_ring->next_to_use = 0; | 2318 | rx_ring->next_to_use = 0; |
2300 | 2319 | ||
2301 | writel(0, adapter->hw.hw_addr + rx_ring->rdh); | 2320 | writel(0, hw->hw_addr + rx_ring->rdh); |
2302 | writel(0, adapter->hw.hw_addr + rx_ring->rdt); | 2321 | writel(0, hw->hw_addr + rx_ring->rdt); |
2303 | } | 2322 | } |
2304 | 2323 | ||
2305 | /** | 2324 | /** |
@@ -2320,15 +2339,16 @@ static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | |||
2320 | */ | 2339 | */ |
2321 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter) | 2340 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter) |
2322 | { | 2341 | { |
2342 | struct e1000_hw *hw = &adapter->hw; | ||
2323 | struct net_device *netdev = adapter->netdev; | 2343 | struct net_device *netdev = adapter->netdev; |
2324 | u32 rctl; | 2344 | u32 rctl; |
2325 | 2345 | ||
2326 | e1000_pci_clear_mwi(&adapter->hw); | 2346 | e1000_pci_clear_mwi(hw); |
2327 | 2347 | ||
2328 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 2348 | rctl = er32(RCTL); |
2329 | rctl |= E1000_RCTL_RST; | 2349 | rctl |= E1000_RCTL_RST; |
2330 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2350 | ew32(RCTL, rctl); |
2331 | E1000_WRITE_FLUSH(&adapter->hw); | 2351 | E1000_WRITE_FLUSH(); |
2332 | mdelay(5); | 2352 | mdelay(5); |
2333 | 2353 | ||
2334 | if (netif_running(netdev)) | 2354 | if (netif_running(netdev)) |
@@ -2337,17 +2357,18 @@ static void e1000_enter_82542_rst(struct e1000_adapter *adapter) | |||
2337 | 2357 | ||
2338 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter) | 2358 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter) |
2339 | { | 2359 | { |
2360 | struct e1000_hw *hw = &adapter->hw; | ||
2340 | struct net_device *netdev = adapter->netdev; | 2361 | struct net_device *netdev = adapter->netdev; |
2341 | u32 rctl; | 2362 | u32 rctl; |
2342 | 2363 | ||
2343 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 2364 | rctl = er32(RCTL); |
2344 | rctl &= ~E1000_RCTL_RST; | 2365 | rctl &= ~E1000_RCTL_RST; |
2345 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2366 | ew32(RCTL, rctl); |
2346 | E1000_WRITE_FLUSH(&adapter->hw); | 2367 | E1000_WRITE_FLUSH(); |
2347 | mdelay(5); | 2368 | mdelay(5); |
2348 | 2369 | ||
2349 | if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) | 2370 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
2350 | e1000_pci_set_mwi(&adapter->hw); | 2371 | e1000_pci_set_mwi(hw); |
2351 | 2372 | ||
2352 | if (netif_running(netdev)) { | 2373 | if (netif_running(netdev)) { |
2353 | /* No need to loop, because 82542 supports only 1 queue */ | 2374 | /* No need to loop, because 82542 supports only 1 queue */ |
@@ -2368,6 +2389,7 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2368 | static int e1000_set_mac(struct net_device *netdev, void *p) | 2389 | static int e1000_set_mac(struct net_device *netdev, void *p) |
2369 | { | 2390 | { |
2370 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2391 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2392 | struct e1000_hw *hw = &adapter->hw; | ||
2371 | struct sockaddr *addr = p; | 2393 | struct sockaddr *addr = p; |
2372 | 2394 | ||
2373 | if (!is_valid_ether_addr(addr->sa_data)) | 2395 | if (!is_valid_ether_addr(addr->sa_data)) |
@@ -2375,19 +2397,19 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2375 | 2397 | ||
2376 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2398 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2377 | 2399 | ||
2378 | if (adapter->hw.mac_type == e1000_82542_rev2_0) | 2400 | if (hw->mac_type == e1000_82542_rev2_0) |
2379 | e1000_enter_82542_rst(adapter); | 2401 | e1000_enter_82542_rst(adapter); |
2380 | 2402 | ||
2381 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2403 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2382 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | 2404 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); |
2383 | 2405 | ||
2384 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2406 | e1000_rar_set(hw, hw->mac_addr, 0); |
2385 | 2407 | ||
2386 | /* With 82571 controllers, LAA may be overwritten (with the default) | 2408 | /* With 82571 controllers, LAA may be overwritten (with the default) |
2387 | * due to controller reset from the other port. */ | 2409 | * due to controller reset from the other port. */ |
2388 | if (adapter->hw.mac_type == e1000_82571) { | 2410 | if (hw->mac_type == e1000_82571) { |
2389 | /* activate the work around */ | 2411 | /* activate the work around */ |
2390 | adapter->hw.laa_is_present = 1; | 2412 | hw->laa_is_present = 1; |
2391 | 2413 | ||
2392 | /* Hold a copy of the LAA in RAR[14] This is done so that | 2414 | /* Hold a copy of the LAA in RAR[14] This is done so that |
2393 | * between the time RAR[0] gets clobbered and the time it | 2415 | * between the time RAR[0] gets clobbered and the time it |
@@ -2395,11 +2417,11 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2395 | * of the RARs and no incoming packets directed to this port | 2417 | * of the RARs and no incoming packets directed to this port |
2396 | * are dropped. Eventaully the LAA will be in RAR[0] and | 2418 | * are dropped. Eventaully the LAA will be in RAR[0] and |
2397 | * RAR[14] */ | 2419 | * RAR[14] */ |
2398 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, | 2420 | e1000_rar_set(hw, hw->mac_addr, |
2399 | E1000_RAR_ENTRIES - 1); | 2421 | E1000_RAR_ENTRIES - 1); |
2400 | } | 2422 | } |
2401 | 2423 | ||
2402 | if (adapter->hw.mac_type == e1000_82542_rev2_0) | 2424 | if (hw->mac_type == e1000_82542_rev2_0) |
2403 | e1000_leave_82542_rst(adapter); | 2425 | e1000_leave_82542_rst(adapter); |
2404 | 2426 | ||
2405 | return 0; | 2427 | return 0; |
@@ -2428,16 +2450,16 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2428 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | 2450 | E1000_NUM_MTA_REGISTERS_ICH8LAN : |
2429 | E1000_NUM_MTA_REGISTERS; | 2451 | E1000_NUM_MTA_REGISTERS; |
2430 | 2452 | ||
2431 | if (adapter->hw.mac_type == e1000_ich8lan) | 2453 | if (hw->mac_type == e1000_ich8lan) |
2432 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; | 2454 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; |
2433 | 2455 | ||
2434 | /* reserve RAR[14] for LAA over-write work-around */ | 2456 | /* reserve RAR[14] for LAA over-write work-around */ |
2435 | if (adapter->hw.mac_type == e1000_82571) | 2457 | if (hw->mac_type == e1000_82571) |
2436 | rar_entries--; | 2458 | rar_entries--; |
2437 | 2459 | ||
2438 | /* Check for Promiscuous and All Multicast modes */ | 2460 | /* Check for Promiscuous and All Multicast modes */ |
2439 | 2461 | ||
2440 | rctl = E1000_READ_REG(hw, RCTL); | 2462 | rctl = er32(RCTL); |
2441 | 2463 | ||
2442 | if (netdev->flags & IFF_PROMISC) { | 2464 | if (netdev->flags & IFF_PROMISC) { |
2443 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2465 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
@@ -2460,7 +2482,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2460 | uc_ptr = netdev->uc_list; | 2482 | uc_ptr = netdev->uc_list; |
2461 | } | 2483 | } |
2462 | 2484 | ||
2463 | E1000_WRITE_REG(hw, RCTL, rctl); | 2485 | ew32(RCTL, rctl); |
2464 | 2486 | ||
2465 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2487 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2466 | 2488 | ||
@@ -2486,9 +2508,9 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2486 | mc_ptr = mc_ptr->next; | 2508 | mc_ptr = mc_ptr->next; |
2487 | } else { | 2509 | } else { |
2488 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); | 2510 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
2489 | E1000_WRITE_FLUSH(hw); | 2511 | E1000_WRITE_FLUSH(); |
2490 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); | 2512 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
2491 | E1000_WRITE_FLUSH(hw); | 2513 | E1000_WRITE_FLUSH(); |
2492 | } | 2514 | } |
2493 | } | 2515 | } |
2494 | WARN_ON(uc_ptr != NULL); | 2516 | WARN_ON(uc_ptr != NULL); |
@@ -2497,7 +2519,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2497 | 2519 | ||
2498 | for (i = 0; i < mta_reg_count; i++) { | 2520 | for (i = 0; i < mta_reg_count; i++) { |
2499 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2521 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
2500 | E1000_WRITE_FLUSH(hw); | 2522 | E1000_WRITE_FLUSH(); |
2501 | } | 2523 | } |
2502 | 2524 | ||
2503 | /* load any remaining addresses into the hash table */ | 2525 | /* load any remaining addresses into the hash table */ |
@@ -2517,7 +2539,8 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2517 | static void e1000_update_phy_info(unsigned long data) | 2539 | static void e1000_update_phy_info(unsigned long data) |
2518 | { | 2540 | { |
2519 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2541 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2520 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 2542 | struct e1000_hw *hw = &adapter->hw; |
2543 | e1000_phy_get_info(hw, &adapter->phy_info); | ||
2521 | } | 2544 | } |
2522 | 2545 | ||
2523 | /** | 2546 | /** |
@@ -2528,29 +2551,22 @@ static void e1000_update_phy_info(unsigned long data) | |||
2528 | static void e1000_82547_tx_fifo_stall(unsigned long data) | 2551 | static void e1000_82547_tx_fifo_stall(unsigned long data) |
2529 | { | 2552 | { |
2530 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2553 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2554 | struct e1000_hw *hw = &adapter->hw; | ||
2531 | struct net_device *netdev = adapter->netdev; | 2555 | struct net_device *netdev = adapter->netdev; |
2532 | u32 tctl; | 2556 | u32 tctl; |
2533 | 2557 | ||
2534 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2558 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2535 | if ((E1000_READ_REG(&adapter->hw, TDT) == | 2559 | if ((er32(TDT) == er32(TDH)) && |
2536 | E1000_READ_REG(&adapter->hw, TDH)) && | 2560 | (er32(TDFT) == er32(TDFH)) && |
2537 | (E1000_READ_REG(&adapter->hw, TDFT) == | 2561 | (er32(TDFTS) == er32(TDFHS))) { |
2538 | E1000_READ_REG(&adapter->hw, TDFH)) && | 2562 | tctl = er32(TCTL); |
2539 | (E1000_READ_REG(&adapter->hw, TDFTS) == | 2563 | ew32(TCTL, tctl & ~E1000_TCTL_EN); |
2540 | E1000_READ_REG(&adapter->hw, TDFHS))) { | 2564 | ew32(TDFT, adapter->tx_head_addr); |
2541 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | 2565 | ew32(TDFH, adapter->tx_head_addr); |
2542 | E1000_WRITE_REG(&adapter->hw, TCTL, | 2566 | ew32(TDFTS, adapter->tx_head_addr); |
2543 | tctl & ~E1000_TCTL_EN); | 2567 | ew32(TDFHS, adapter->tx_head_addr); |
2544 | E1000_WRITE_REG(&adapter->hw, TDFT, | 2568 | ew32(TCTL, tctl); |
2545 | adapter->tx_head_addr); | 2569 | E1000_WRITE_FLUSH(); |
2546 | E1000_WRITE_REG(&adapter->hw, TDFH, | ||
2547 | adapter->tx_head_addr); | ||
2548 | E1000_WRITE_REG(&adapter->hw, TDFTS, | ||
2549 | adapter->tx_head_addr); | ||
2550 | E1000_WRITE_REG(&adapter->hw, TDFHS, | ||
2551 | adapter->tx_head_addr); | ||
2552 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | ||
2553 | E1000_WRITE_FLUSH(&adapter->hw); | ||
2554 | 2570 | ||
2555 | adapter->tx_fifo_head = 0; | 2571 | adapter->tx_fifo_head = 0; |
2556 | atomic_set(&adapter->tx_fifo_stall, 0); | 2572 | atomic_set(&adapter->tx_fifo_stall, 0); |
@@ -2568,41 +2584,42 @@ static void e1000_82547_tx_fifo_stall(unsigned long data) | |||
2568 | static void e1000_watchdog(unsigned long data) | 2584 | static void e1000_watchdog(unsigned long data) |
2569 | { | 2585 | { |
2570 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2586 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2587 | struct e1000_hw *hw = &adapter->hw; | ||
2571 | struct net_device *netdev = adapter->netdev; | 2588 | struct net_device *netdev = adapter->netdev; |
2572 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2589 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2573 | u32 link, tctl; | 2590 | u32 link, tctl; |
2574 | s32 ret_val; | 2591 | s32 ret_val; |
2575 | 2592 | ||
2576 | ret_val = e1000_check_for_link(&adapter->hw); | 2593 | ret_val = e1000_check_for_link(hw); |
2577 | if ((ret_val == E1000_ERR_PHY) && | 2594 | if ((ret_val == E1000_ERR_PHY) && |
2578 | (adapter->hw.phy_type == e1000_phy_igp_3) && | 2595 | (hw->phy_type == e1000_phy_igp_3) && |
2579 | (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | 2596 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { |
2580 | /* See e1000_kumeran_lock_loss_workaround() */ | 2597 | /* See e1000_kumeran_lock_loss_workaround() */ |
2581 | DPRINTK(LINK, INFO, | 2598 | DPRINTK(LINK, INFO, |
2582 | "Gigabit has been disabled, downgrading speed\n"); | 2599 | "Gigabit has been disabled, downgrading speed\n"); |
2583 | } | 2600 | } |
2584 | 2601 | ||
2585 | if (adapter->hw.mac_type == e1000_82573) { | 2602 | if (hw->mac_type == e1000_82573) { |
2586 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2603 | e1000_enable_tx_pkt_filtering(hw); |
2587 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2604 | if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id) |
2588 | e1000_update_mng_vlan(adapter); | 2605 | e1000_update_mng_vlan(adapter); |
2589 | } | 2606 | } |
2590 | 2607 | ||
2591 | if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && | 2608 | if ((hw->media_type == e1000_media_type_internal_serdes) && |
2592 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) | 2609 | !(er32(TXCW) & E1000_TXCW_ANE)) |
2593 | link = !adapter->hw.serdes_link_down; | 2610 | link = !hw->serdes_link_down; |
2594 | else | 2611 | else |
2595 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; | 2612 | link = er32(STATUS) & E1000_STATUS_LU; |
2596 | 2613 | ||
2597 | if (link) { | 2614 | if (link) { |
2598 | if (!netif_carrier_ok(netdev)) { | 2615 | if (!netif_carrier_ok(netdev)) { |
2599 | u32 ctrl; | 2616 | u32 ctrl; |
2600 | bool txb2b = true; | 2617 | bool txb2b = true; |
2601 | e1000_get_speed_and_duplex(&adapter->hw, | 2618 | e1000_get_speed_and_duplex(hw, |
2602 | &adapter->link_speed, | 2619 | &adapter->link_speed, |
2603 | &adapter->link_duplex); | 2620 | &adapter->link_duplex); |
2604 | 2621 | ||
2605 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 2622 | ctrl = er32(CTRL); |
2606 | DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " | 2623 | DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " |
2607 | "Flow Control: %s\n", | 2624 | "Flow Control: %s\n", |
2608 | adapter->link_speed, | 2625 | adapter->link_speed, |
@@ -2630,19 +2647,19 @@ static void e1000_watchdog(unsigned long data) | |||
2630 | break; | 2647 | break; |
2631 | } | 2648 | } |
2632 | 2649 | ||
2633 | if ((adapter->hw.mac_type == e1000_82571 || | 2650 | if ((hw->mac_type == e1000_82571 || |
2634 | adapter->hw.mac_type == e1000_82572) && | 2651 | hw->mac_type == e1000_82572) && |
2635 | !txb2b) { | 2652 | !txb2b) { |
2636 | u32 tarc0; | 2653 | u32 tarc0; |
2637 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | 2654 | tarc0 = er32(TARC0); |
2638 | tarc0 &= ~(1 << 21); | 2655 | tarc0 &= ~(1 << 21); |
2639 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | 2656 | ew32(TARC0, tarc0); |
2640 | } | 2657 | } |
2641 | 2658 | ||
2642 | /* disable TSO for pcie and 10/100 speeds, to avoid | 2659 | /* disable TSO for pcie and 10/100 speeds, to avoid |
2643 | * some hardware issues */ | 2660 | * some hardware issues */ |
2644 | if (!adapter->tso_force && | 2661 | if (!adapter->tso_force && |
2645 | adapter->hw.bus_type == e1000_bus_type_pci_express){ | 2662 | hw->bus_type == e1000_bus_type_pci_express){ |
2646 | switch (adapter->link_speed) { | 2663 | switch (adapter->link_speed) { |
2647 | case SPEED_10: | 2664 | case SPEED_10: |
2648 | case SPEED_100: | 2665 | case SPEED_100: |
@@ -2663,9 +2680,9 @@ static void e1000_watchdog(unsigned long data) | |||
2663 | 2680 | ||
2664 | /* enable transmits in the hardware, need to do this | 2681 | /* enable transmits in the hardware, need to do this |
2665 | * after setting TARC0 */ | 2682 | * after setting TARC0 */ |
2666 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | 2683 | tctl = er32(TCTL); |
2667 | tctl |= E1000_TCTL_EN; | 2684 | tctl |= E1000_TCTL_EN; |
2668 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | 2685 | ew32(TCTL, tctl); |
2669 | 2686 | ||
2670 | netif_carrier_on(netdev); | 2687 | netif_carrier_on(netdev); |
2671 | netif_wake_queue(netdev); | 2688 | netif_wake_queue(netdev); |
@@ -2673,10 +2690,9 @@ static void e1000_watchdog(unsigned long data) | |||
2673 | adapter->smartspeed = 0; | 2690 | adapter->smartspeed = 0; |
2674 | } else { | 2691 | } else { |
2675 | /* make sure the receive unit is started */ | 2692 | /* make sure the receive unit is started */ |
2676 | if (adapter->hw.rx_needs_kicking) { | 2693 | if (hw->rx_needs_kicking) { |
2677 | struct e1000_hw *hw = &adapter->hw; | 2694 | u32 rctl = er32(RCTL); |
2678 | u32 rctl = E1000_READ_REG(hw, RCTL); | 2695 | ew32(RCTL, rctl | E1000_RCTL_EN); |
2679 | E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); | ||
2680 | } | 2696 | } |
2681 | } | 2697 | } |
2682 | } else { | 2698 | } else { |
@@ -2693,7 +2709,7 @@ static void e1000_watchdog(unsigned long data) | |||
2693 | * disable receives in the ISR and | 2709 | * disable receives in the ISR and |
2694 | * reset device here in the watchdog | 2710 | * reset device here in the watchdog |
2695 | */ | 2711 | */ |
2696 | if (adapter->hw.mac_type == e1000_80003es2lan) | 2712 | if (hw->mac_type == e1000_80003es2lan) |
2697 | /* reset device */ | 2713 | /* reset device */ |
2698 | schedule_work(&adapter->reset_task); | 2714 | schedule_work(&adapter->reset_task); |
2699 | } | 2715 | } |
@@ -2703,9 +2719,9 @@ static void e1000_watchdog(unsigned long data) | |||
2703 | 2719 | ||
2704 | e1000_update_stats(adapter); | 2720 | e1000_update_stats(adapter); |
2705 | 2721 | ||
2706 | adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | 2722 | hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
2707 | adapter->tpt_old = adapter->stats.tpt; | 2723 | adapter->tpt_old = adapter->stats.tpt; |
2708 | adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; | 2724 | hw->collision_delta = adapter->stats.colc - adapter->colc_old; |
2709 | adapter->colc_old = adapter->stats.colc; | 2725 | adapter->colc_old = adapter->stats.colc; |
2710 | 2726 | ||
2711 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; | 2727 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
@@ -2713,7 +2729,7 @@ static void e1000_watchdog(unsigned long data) | |||
2713 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; | 2729 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
2714 | adapter->gotcl_old = adapter->stats.gotcl; | 2730 | adapter->gotcl_old = adapter->stats.gotcl; |
2715 | 2731 | ||
2716 | e1000_update_adaptive(&adapter->hw); | 2732 | e1000_update_adaptive(hw); |
2717 | 2733 | ||
2718 | if (!netif_carrier_ok(netdev)) { | 2734 | if (!netif_carrier_ok(netdev)) { |
2719 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2735 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
@@ -2727,15 +2743,15 @@ static void e1000_watchdog(unsigned long data) | |||
2727 | } | 2743 | } |
2728 | 2744 | ||
2729 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2745 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2730 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); | 2746 | ew32(ICS, E1000_ICS_RXDMT0); |
2731 | 2747 | ||
2732 | /* Force detection of hung controller every watchdog period */ | 2748 | /* Force detection of hung controller every watchdog period */ |
2733 | adapter->detect_tx_hung = true; | 2749 | adapter->detect_tx_hung = true; |
2734 | 2750 | ||
2735 | /* With 82571 controllers, LAA may be overwritten due to controller | 2751 | /* With 82571 controllers, LAA may be overwritten due to controller |
2736 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | 2752 | * reset from the other port. Set the appropriate LAA in RAR[0] */ |
2737 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) | 2753 | if (hw->mac_type == e1000_82571 && hw->laa_is_present) |
2738 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2754 | e1000_rar_set(hw, hw->mac_addr, 0); |
2739 | 2755 | ||
2740 | /* Reset the timer */ | 2756 | /* Reset the timer */ |
2741 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | 2757 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); |
@@ -2870,7 +2886,7 @@ set_itr_now: | |||
2870 | min(adapter->itr + (new_itr >> 2), new_itr) : | 2886 | min(adapter->itr + (new_itr >> 2), new_itr) : |
2871 | new_itr; | 2887 | new_itr; |
2872 | adapter->itr = new_itr; | 2888 | adapter->itr = new_itr; |
2873 | E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); | 2889 | ew32(ITR, 1000000000 / (new_itr * 256)); |
2874 | } | 2890 | } |
2875 | 2891 | ||
2876 | return; | 2892 | return; |
@@ -2999,6 +3015,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2999 | unsigned int max_per_txd, unsigned int nr_frags, | 3015 | unsigned int max_per_txd, unsigned int nr_frags, |
3000 | unsigned int mss) | 3016 | unsigned int mss) |
3001 | { | 3017 | { |
3018 | struct e1000_hw *hw = &adapter->hw; | ||
3002 | struct e1000_buffer *buffer_info; | 3019 | struct e1000_buffer *buffer_info; |
3003 | unsigned int len = skb->len; | 3020 | unsigned int len = skb->len; |
3004 | unsigned int offset = 0, size, count = 0, i; | 3021 | unsigned int offset = 0, size, count = 0, i; |
@@ -3029,7 +3046,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3029 | * The fix is to make sure that the first descriptor of a | 3046 | * The fix is to make sure that the first descriptor of a |
3030 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes | 3047 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
3031 | */ | 3048 | */ |
3032 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 3049 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
3033 | (size > 2015) && count == 0)) | 3050 | (size > 2015) && count == 0)) |
3034 | size = 2015; | 3051 | size = 2015; |
3035 | 3052 | ||
@@ -3105,6 +3122,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
3105 | struct e1000_tx_ring *tx_ring, int tx_flags, | 3122 | struct e1000_tx_ring *tx_ring, int tx_flags, |
3106 | int count) | 3123 | int count) |
3107 | { | 3124 | { |
3125 | struct e1000_hw *hw = &adapter->hw; | ||
3108 | struct e1000_tx_desc *tx_desc = NULL; | 3126 | struct e1000_tx_desc *tx_desc = NULL; |
3109 | struct e1000_buffer *buffer_info; | 3127 | struct e1000_buffer *buffer_info; |
3110 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | 3128 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
@@ -3150,7 +3168,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
3150 | wmb(); | 3168 | wmb(); |
3151 | 3169 | ||
3152 | tx_ring->next_to_use = i; | 3170 | tx_ring->next_to_use = i; |
3153 | writel(i, adapter->hw.hw_addr + tx_ring->tdt); | 3171 | writel(i, hw->hw_addr + tx_ring->tdt); |
3154 | /* we need this if more than one processor can write to our tail | 3172 | /* we need this if more than one processor can write to our tail |
3155 | * at a time, it syncronizes IO on IA64/Altix systems */ | 3173 | * at a time, it syncronizes IO on IA64/Altix systems */ |
3156 | mmiowb(); | 3174 | mmiowb(); |
@@ -3201,8 +3219,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |||
3201 | struct e1000_hw *hw = &adapter->hw; | 3219 | struct e1000_hw *hw = &adapter->hw; |
3202 | u16 length, offset; | 3220 | u16 length, offset; |
3203 | if (vlan_tx_tag_present(skb)) { | 3221 | if (vlan_tx_tag_present(skb)) { |
3204 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | 3222 | if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) && |
3205 | ( adapter->hw.mng_cookie.status & | 3223 | ( hw->mng_cookie.status & |
3206 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 3224 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
3207 | return 0; | 3225 | return 0; |
3208 | } | 3226 | } |
@@ -3263,6 +3281,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, | |||
3263 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 3281 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
3264 | { | 3282 | { |
3265 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3283 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3284 | struct e1000_hw *hw = &adapter->hw; | ||
3266 | struct e1000_tx_ring *tx_ring; | 3285 | struct e1000_tx_ring *tx_ring; |
3267 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; | 3286 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; |
3268 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | 3287 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
@@ -3288,7 +3307,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3288 | 3307 | ||
3289 | /* 82571 and newer doesn't need the workaround that limited descriptor | 3308 | /* 82571 and newer doesn't need the workaround that limited descriptor |
3290 | * length to 4kB */ | 3309 | * length to 4kB */ |
3291 | if (adapter->hw.mac_type >= e1000_82571) | 3310 | if (hw->mac_type >= e1000_82571) |
3292 | max_per_txd = 8192; | 3311 | max_per_txd = 8192; |
3293 | 3312 | ||
3294 | mss = skb_shinfo(skb)->gso_size; | 3313 | mss = skb_shinfo(skb)->gso_size; |
@@ -3308,7 +3327,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3308 | * frags into skb->data */ | 3327 | * frags into skb->data */ |
3309 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3328 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3310 | if (skb->data_len && hdr_len == len) { | 3329 | if (skb->data_len && hdr_len == len) { |
3311 | switch (adapter->hw.mac_type) { | 3330 | switch (hw->mac_type) { |
3312 | unsigned int pull_size; | 3331 | unsigned int pull_size; |
3313 | case e1000_82544: | 3332 | case e1000_82544: |
3314 | /* Make sure we have room to chop off 4 bytes, | 3333 | /* Make sure we have room to chop off 4 bytes, |
@@ -3357,7 +3376,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3357 | /* work-around for errata 10 and it applies to all controllers | 3376 | /* work-around for errata 10 and it applies to all controllers |
3358 | * in PCI-X mode, so add one more descriptor to the count | 3377 | * in PCI-X mode, so add one more descriptor to the count |
3359 | */ | 3378 | */ |
3360 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 3379 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
3361 | (len > 2015))) | 3380 | (len > 2015))) |
3362 | count++; | 3381 | count++; |
3363 | 3382 | ||
@@ -3369,8 +3388,8 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3369 | count += nr_frags; | 3388 | count += nr_frags; |
3370 | 3389 | ||
3371 | 3390 | ||
3372 | if (adapter->hw.tx_pkt_filtering && | 3391 | if (hw->tx_pkt_filtering && |
3373 | (adapter->hw.mac_type == e1000_82573)) | 3392 | (hw->mac_type == e1000_82573)) |
3374 | e1000_transfer_dhcp_info(adapter, skb); | 3393 | e1000_transfer_dhcp_info(adapter, skb); |
3375 | 3394 | ||
3376 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) | 3395 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) |
@@ -3384,7 +3403,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3384 | return NETDEV_TX_BUSY; | 3403 | return NETDEV_TX_BUSY; |
3385 | } | 3404 | } |
3386 | 3405 | ||
3387 | if (unlikely(adapter->hw.mac_type == e1000_82547)) { | 3406 | if (unlikely(hw->mac_type == e1000_82547)) { |
3388 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | 3407 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
3389 | netif_stop_queue(netdev); | 3408 | netif_stop_queue(netdev); |
3390 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); | 3409 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
@@ -3481,6 +3500,7 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | |||
3481 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | 3500 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
3482 | { | 3501 | { |
3483 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3502 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3503 | struct e1000_hw *hw = &adapter->hw; | ||
3484 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 3504 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3485 | u16 eeprom_data = 0; | 3505 | u16 eeprom_data = 0; |
3486 | 3506 | ||
@@ -3491,7 +3511,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3491 | } | 3511 | } |
3492 | 3512 | ||
3493 | /* Adapter-specific max frame size limits. */ | 3513 | /* Adapter-specific max frame size limits. */ |
3494 | switch (adapter->hw.mac_type) { | 3514 | switch (hw->mac_type) { |
3495 | case e1000_undefined ... e1000_82542_rev2_1: | 3515 | case e1000_undefined ... e1000_82542_rev2_1: |
3496 | case e1000_ich8lan: | 3516 | case e1000_ich8lan: |
3497 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3517 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
@@ -3503,9 +3523,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3503 | /* Jumbo Frames not supported if: | 3523 | /* Jumbo Frames not supported if: |
3504 | * - this is not an 82573L device | 3524 | * - this is not an 82573L device |
3505 | * - ASPM is enabled in any way (0x1A bits 3:2) */ | 3525 | * - ASPM is enabled in any way (0x1A bits 3:2) */ |
3506 | e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, | 3526 | e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1, |
3507 | &eeprom_data); | 3527 | &eeprom_data); |
3508 | if ((adapter->hw.device_id != E1000_DEV_ID_82573L) || | 3528 | if ((hw->device_id != E1000_DEV_ID_82573L) || |
3509 | (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { | 3529 | (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { |
3510 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3530 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
3511 | DPRINTK(PROBE, ERR, | 3531 | DPRINTK(PROBE, ERR, |
@@ -3552,13 +3572,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3552 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3572 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3553 | 3573 | ||
3554 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 3574 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3555 | if (!adapter->hw.tbi_compatibility_on && | 3575 | if (!hw->tbi_compatibility_on && |
3556 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | 3576 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || |
3557 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | 3577 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
3558 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 3578 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3559 | 3579 | ||
3560 | netdev->mtu = new_mtu; | 3580 | netdev->mtu = new_mtu; |
3561 | adapter->hw.max_frame_size = max_frame; | 3581 | hw->max_frame_size = max_frame; |
3562 | 3582 | ||
3563 | if (netif_running(netdev)) | 3583 | if (netif_running(netdev)) |
3564 | e1000_reinit_locked(adapter); | 3584 | e1000_reinit_locked(adapter); |
@@ -3596,89 +3616,89 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3596 | * be written while holding adapter->stats_lock | 3616 | * be written while holding adapter->stats_lock |
3597 | */ | 3617 | */ |
3598 | 3618 | ||
3599 | adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS); | 3619 | adapter->stats.crcerrs += er32(CRCERRS); |
3600 | adapter->stats.gprc += E1000_READ_REG(hw, GPRC); | 3620 | adapter->stats.gprc += er32(GPRC); |
3601 | adapter->stats.gorcl += E1000_READ_REG(hw, GORCL); | 3621 | adapter->stats.gorcl += er32(GORCL); |
3602 | adapter->stats.gorch += E1000_READ_REG(hw, GORCH); | 3622 | adapter->stats.gorch += er32(GORCH); |
3603 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); | 3623 | adapter->stats.bprc += er32(BPRC); |
3604 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); | 3624 | adapter->stats.mprc += er32(MPRC); |
3605 | adapter->stats.roc += E1000_READ_REG(hw, ROC); | 3625 | adapter->stats.roc += er32(ROC); |
3606 | 3626 | ||
3607 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3627 | if (hw->mac_type != e1000_ich8lan) { |
3608 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); | 3628 | adapter->stats.prc64 += er32(PRC64); |
3609 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); | 3629 | adapter->stats.prc127 += er32(PRC127); |
3610 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); | 3630 | adapter->stats.prc255 += er32(PRC255); |
3611 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); | 3631 | adapter->stats.prc511 += er32(PRC511); |
3612 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); | 3632 | adapter->stats.prc1023 += er32(PRC1023); |
3613 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); | 3633 | adapter->stats.prc1522 += er32(PRC1522); |
3614 | } | 3634 | } |
3615 | 3635 | ||
3616 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); | 3636 | adapter->stats.symerrs += er32(SYMERRS); |
3617 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); | 3637 | adapter->stats.mpc += er32(MPC); |
3618 | adapter->stats.scc += E1000_READ_REG(hw, SCC); | 3638 | adapter->stats.scc += er32(SCC); |
3619 | adapter->stats.ecol += E1000_READ_REG(hw, ECOL); | 3639 | adapter->stats.ecol += er32(ECOL); |
3620 | adapter->stats.mcc += E1000_READ_REG(hw, MCC); | 3640 | adapter->stats.mcc += er32(MCC); |
3621 | adapter->stats.latecol += E1000_READ_REG(hw, LATECOL); | 3641 | adapter->stats.latecol += er32(LATECOL); |
3622 | adapter->stats.dc += E1000_READ_REG(hw, DC); | 3642 | adapter->stats.dc += er32(DC); |
3623 | adapter->stats.sec += E1000_READ_REG(hw, SEC); | 3643 | adapter->stats.sec += er32(SEC); |
3624 | adapter->stats.rlec += E1000_READ_REG(hw, RLEC); | 3644 | adapter->stats.rlec += er32(RLEC); |
3625 | adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC); | 3645 | adapter->stats.xonrxc += er32(XONRXC); |
3626 | adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC); | 3646 | adapter->stats.xontxc += er32(XONTXC); |
3627 | adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC); | 3647 | adapter->stats.xoffrxc += er32(XOFFRXC); |
3628 | adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC); | 3648 | adapter->stats.xofftxc += er32(XOFFTXC); |
3629 | adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC); | 3649 | adapter->stats.fcruc += er32(FCRUC); |
3630 | adapter->stats.gptc += E1000_READ_REG(hw, GPTC); | 3650 | adapter->stats.gptc += er32(GPTC); |
3631 | adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL); | 3651 | adapter->stats.gotcl += er32(GOTCL); |
3632 | adapter->stats.gotch += E1000_READ_REG(hw, GOTCH); | 3652 | adapter->stats.gotch += er32(GOTCH); |
3633 | adapter->stats.rnbc += E1000_READ_REG(hw, RNBC); | 3653 | adapter->stats.rnbc += er32(RNBC); |
3634 | adapter->stats.ruc += E1000_READ_REG(hw, RUC); | 3654 | adapter->stats.ruc += er32(RUC); |
3635 | adapter->stats.rfc += E1000_READ_REG(hw, RFC); | 3655 | adapter->stats.rfc += er32(RFC); |
3636 | adapter->stats.rjc += E1000_READ_REG(hw, RJC); | 3656 | adapter->stats.rjc += er32(RJC); |
3637 | adapter->stats.torl += E1000_READ_REG(hw, TORL); | 3657 | adapter->stats.torl += er32(TORL); |
3638 | adapter->stats.torh += E1000_READ_REG(hw, TORH); | 3658 | adapter->stats.torh += er32(TORH); |
3639 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); | 3659 | adapter->stats.totl += er32(TOTL); |
3640 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); | 3660 | adapter->stats.toth += er32(TOTH); |
3641 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); | 3661 | adapter->stats.tpr += er32(TPR); |
3642 | 3662 | ||
3643 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3663 | if (hw->mac_type != e1000_ich8lan) { |
3644 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); | 3664 | adapter->stats.ptc64 += er32(PTC64); |
3645 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); | 3665 | adapter->stats.ptc127 += er32(PTC127); |
3646 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); | 3666 | adapter->stats.ptc255 += er32(PTC255); |
3647 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); | 3667 | adapter->stats.ptc511 += er32(PTC511); |
3648 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); | 3668 | adapter->stats.ptc1023 += er32(PTC1023); |
3649 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); | 3669 | adapter->stats.ptc1522 += er32(PTC1522); |
3650 | } | 3670 | } |
3651 | 3671 | ||
3652 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); | 3672 | adapter->stats.mptc += er32(MPTC); |
3653 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); | 3673 | adapter->stats.bptc += er32(BPTC); |
3654 | 3674 | ||
3655 | /* used for adaptive IFS */ | 3675 | /* used for adaptive IFS */ |
3656 | 3676 | ||
3657 | hw->tx_packet_delta = E1000_READ_REG(hw, TPT); | 3677 | hw->tx_packet_delta = er32(TPT); |
3658 | adapter->stats.tpt += hw->tx_packet_delta; | 3678 | adapter->stats.tpt += hw->tx_packet_delta; |
3659 | hw->collision_delta = E1000_READ_REG(hw, COLC); | 3679 | hw->collision_delta = er32(COLC); |
3660 | adapter->stats.colc += hw->collision_delta; | 3680 | adapter->stats.colc += hw->collision_delta; |
3661 | 3681 | ||
3662 | if (hw->mac_type >= e1000_82543) { | 3682 | if (hw->mac_type >= e1000_82543) { |
3663 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); | 3683 | adapter->stats.algnerrc += er32(ALGNERRC); |
3664 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); | 3684 | adapter->stats.rxerrc += er32(RXERRC); |
3665 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); | 3685 | adapter->stats.tncrs += er32(TNCRS); |
3666 | adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); | 3686 | adapter->stats.cexterr += er32(CEXTERR); |
3667 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); | 3687 | adapter->stats.tsctc += er32(TSCTC); |
3668 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); | 3688 | adapter->stats.tsctfc += er32(TSCTFC); |
3669 | } | 3689 | } |
3670 | if (hw->mac_type > e1000_82547_rev_2) { | 3690 | if (hw->mac_type > e1000_82547_rev_2) { |
3671 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3691 | adapter->stats.iac += er32(IAC); |
3672 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3692 | adapter->stats.icrxoc += er32(ICRXOC); |
3673 | 3693 | ||
3674 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3694 | if (hw->mac_type != e1000_ich8lan) { |
3675 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3695 | adapter->stats.icrxptc += er32(ICRXPTC); |
3676 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); | 3696 | adapter->stats.icrxatc += er32(ICRXATC); |
3677 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); | 3697 | adapter->stats.ictxptc += er32(ICTXPTC); |
3678 | adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); | 3698 | adapter->stats.ictxatc += er32(ICTXATC); |
3679 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); | 3699 | adapter->stats.ictxqec += er32(ICTXQEC); |
3680 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); | 3700 | adapter->stats.ictxqmtc += er32(ICTXQMTC); |
3681 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); | 3701 | adapter->stats.icrxdmtc += er32(ICRXDMTC); |
3682 | } | 3702 | } |
3683 | } | 3703 | } |
3684 | 3704 | ||
@@ -3706,7 +3726,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3706 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3726 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; |
3707 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3727 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; |
3708 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3728 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; |
3709 | if (adapter->hw.bad_tx_carr_stats_fd && | 3729 | if (hw->bad_tx_carr_stats_fd && |
3710 | adapter->link_duplex == FULL_DUPLEX) { | 3730 | adapter->link_duplex == FULL_DUPLEX) { |
3711 | adapter->net_stats.tx_carrier_errors = 0; | 3731 | adapter->net_stats.tx_carrier_errors = 0; |
3712 | adapter->stats.tncrs = 0; | 3732 | adapter->stats.tncrs = 0; |
@@ -3729,10 +3749,10 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3729 | } | 3749 | } |
3730 | 3750 | ||
3731 | /* Management Stats */ | 3751 | /* Management Stats */ |
3732 | if (adapter->hw.has_smbus) { | 3752 | if (hw->has_smbus) { |
3733 | adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC); | 3753 | adapter->stats.mgptc += er32(MGTPTC); |
3734 | adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC); | 3754 | adapter->stats.mgprc += er32(MGTPRC); |
3735 | adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC); | 3755 | adapter->stats.mgpdc += er32(MGTPDC); |
3736 | } | 3756 | } |
3737 | 3757 | ||
3738 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3758 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
@@ -3752,7 +3772,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
3752 | #ifndef CONFIG_E1000_NAPI | 3772 | #ifndef CONFIG_E1000_NAPI |
3753 | int i; | 3773 | int i; |
3754 | #endif | 3774 | #endif |
3755 | u32 icr = E1000_READ_REG(hw, ICR); | 3775 | u32 icr = er32(ICR); |
3756 | 3776 | ||
3757 | /* in NAPI mode read ICR disables interrupts using IAM */ | 3777 | /* in NAPI mode read ICR disables interrupts using IAM */ |
3758 | 3778 | ||
@@ -3762,10 +3782,10 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
3762 | * link down event; disable receives here in the ISR and reset | 3782 | * link down event; disable receives here in the ISR and reset |
3763 | * adapter in watchdog */ | 3783 | * adapter in watchdog */ |
3764 | if (netif_carrier_ok(netdev) && | 3784 | if (netif_carrier_ok(netdev) && |
3765 | (adapter->hw.mac_type == e1000_80003es2lan)) { | 3785 | (hw->mac_type == e1000_80003es2lan)) { |
3766 | /* disable receives */ | 3786 | /* disable receives */ |
3767 | u32 rctl = E1000_READ_REG(hw, RCTL); | 3787 | u32 rctl = er32(RCTL); |
3768 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 3788 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
3769 | } | 3789 | } |
3770 | /* guard against interrupt when we're going down */ | 3790 | /* guard against interrupt when we're going down */ |
3771 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 3791 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -3810,7 +3830,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3810 | struct net_device *netdev = data; | 3830 | struct net_device *netdev = data; |
3811 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3831 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3812 | struct e1000_hw *hw = &adapter->hw; | 3832 | struct e1000_hw *hw = &adapter->hw; |
3813 | u32 rctl, icr = E1000_READ_REG(hw, ICR); | 3833 | u32 rctl, icr = er32(ICR); |
3814 | #ifndef CONFIG_E1000_NAPI | 3834 | #ifndef CONFIG_E1000_NAPI |
3815 | int i; | 3835 | int i; |
3816 | #endif | 3836 | #endif |
@@ -3836,10 +3856,10 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3836 | * reset adapter in watchdog | 3856 | * reset adapter in watchdog |
3837 | */ | 3857 | */ |
3838 | if (netif_carrier_ok(netdev) && | 3858 | if (netif_carrier_ok(netdev) && |
3839 | (adapter->hw.mac_type == e1000_80003es2lan)) { | 3859 | (hw->mac_type == e1000_80003es2lan)) { |
3840 | /* disable receives */ | 3860 | /* disable receives */ |
3841 | rctl = E1000_READ_REG(hw, RCTL); | 3861 | rctl = er32(RCTL); |
3842 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 3862 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
3843 | } | 3863 | } |
3844 | /* guard against interrupt when we're going down */ | 3864 | /* guard against interrupt when we're going down */ |
3845 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 3865 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -3849,8 +3869,8 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3849 | #ifdef CONFIG_E1000_NAPI | 3869 | #ifdef CONFIG_E1000_NAPI |
3850 | if (unlikely(hw->mac_type < e1000_82571)) { | 3870 | if (unlikely(hw->mac_type < e1000_82571)) { |
3851 | /* disable interrupts, without the synchronize_irq bit */ | 3871 | /* disable interrupts, without the synchronize_irq bit */ |
3852 | E1000_WRITE_REG(hw, IMC, ~0); | 3872 | ew32(IMC, ~0); |
3853 | E1000_WRITE_FLUSH(hw); | 3873 | E1000_WRITE_FLUSH(); |
3854 | } | 3874 | } |
3855 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { | 3875 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { |
3856 | adapter->total_tx_bytes = 0; | 3876 | adapter->total_tx_bytes = 0; |
@@ -3874,7 +3894,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3874 | * de-assertion state. | 3894 | * de-assertion state. |
3875 | */ | 3895 | */ |
3876 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3896 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3877 | E1000_WRITE_REG(hw, IMC, ~0); | 3897 | ew32(IMC, ~0); |
3878 | 3898 | ||
3879 | adapter->total_tx_bytes = 0; | 3899 | adapter->total_tx_bytes = 0; |
3880 | adapter->total_rx_bytes = 0; | 3900 | adapter->total_rx_bytes = 0; |
@@ -3947,6 +3967,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
3947 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | 3967 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
3948 | struct e1000_tx_ring *tx_ring) | 3968 | struct e1000_tx_ring *tx_ring) |
3949 | { | 3969 | { |
3970 | struct e1000_hw *hw = &adapter->hw; | ||
3950 | struct net_device *netdev = adapter->netdev; | 3971 | struct net_device *netdev = adapter->netdev; |
3951 | struct e1000_tx_desc *tx_desc, *eop_desc; | 3972 | struct e1000_tx_desc *tx_desc, *eop_desc; |
3952 | struct e1000_buffer *buffer_info; | 3973 | struct e1000_buffer *buffer_info; |
@@ -4014,8 +4035,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4014 | if (tx_ring->buffer_info[eop].dma && | 4035 | if (tx_ring->buffer_info[eop].dma && |
4015 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 4036 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
4016 | (adapter->tx_timeout_factor * HZ)) | 4037 | (adapter->tx_timeout_factor * HZ)) |
4017 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 4038 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
4018 | E1000_STATUS_TXOFF)) { | ||
4019 | 4039 | ||
4020 | /* detected Tx unit hang */ | 4040 | /* detected Tx unit hang */ |
4021 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | 4041 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
@@ -4031,8 +4051,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4031 | " next_to_watch.status <%x>\n", | 4051 | " next_to_watch.status <%x>\n", |
4032 | (unsigned long)((tx_ring - adapter->tx_ring) / | 4052 | (unsigned long)((tx_ring - adapter->tx_ring) / |
4033 | sizeof(struct e1000_tx_ring)), | 4053 | sizeof(struct e1000_tx_ring)), |
4034 | readl(adapter->hw.hw_addr + tx_ring->tdh), | 4054 | readl(hw->hw_addr + tx_ring->tdh), |
4035 | readl(adapter->hw.hw_addr + tx_ring->tdt), | 4055 | readl(hw->hw_addr + tx_ring->tdt), |
4036 | tx_ring->next_to_use, | 4056 | tx_ring->next_to_use, |
4037 | tx_ring->next_to_clean, | 4057 | tx_ring->next_to_clean, |
4038 | tx_ring->buffer_info[eop].time_stamp, | 4058 | tx_ring->buffer_info[eop].time_stamp, |
@@ -4060,12 +4080,13 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4060 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | 4080 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
4061 | u32 csum, struct sk_buff *skb) | 4081 | u32 csum, struct sk_buff *skb) |
4062 | { | 4082 | { |
4083 | struct e1000_hw *hw = &adapter->hw; | ||
4063 | u16 status = (u16)status_err; | 4084 | u16 status = (u16)status_err; |
4064 | u8 errors = (u8)(status_err >> 24); | 4085 | u8 errors = (u8)(status_err >> 24); |
4065 | skb->ip_summed = CHECKSUM_NONE; | 4086 | skb->ip_summed = CHECKSUM_NONE; |
4066 | 4087 | ||
4067 | /* 82543 or newer only */ | 4088 | /* 82543 or newer only */ |
4068 | if (unlikely(adapter->hw.mac_type < e1000_82543)) return; | 4089 | if (unlikely(hw->mac_type < e1000_82543)) return; |
4069 | /* Ignore Checksum bit is set */ | 4090 | /* Ignore Checksum bit is set */ |
4070 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; | 4091 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; |
4071 | /* TCP/UDP checksum error bit is set */ | 4092 | /* TCP/UDP checksum error bit is set */ |
@@ -4075,7 +4096,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
4075 | return; | 4096 | return; |
4076 | } | 4097 | } |
4077 | /* TCP/UDP Checksum has not been calculated */ | 4098 | /* TCP/UDP Checksum has not been calculated */ |
4078 | if (adapter->hw.mac_type <= e1000_82547_rev_2) { | 4099 | if (hw->mac_type <= e1000_82547_rev_2) { |
4079 | if (!(status & E1000_RXD_STAT_TCPCS)) | 4100 | if (!(status & E1000_RXD_STAT_TCPCS)) |
4080 | return; | 4101 | return; |
4081 | } else { | 4102 | } else { |
@@ -4086,7 +4107,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
4086 | if (likely(status & E1000_RXD_STAT_TCPCS)) { | 4107 | if (likely(status & E1000_RXD_STAT_TCPCS)) { |
4087 | /* TCP checksum is good */ | 4108 | /* TCP checksum is good */ |
4088 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 4109 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4089 | } else if (adapter->hw.mac_type > e1000_82547_rev_2) { | 4110 | } else if (hw->mac_type > e1000_82547_rev_2) { |
4090 | /* IP fragment with UDP payload */ | 4111 | /* IP fragment with UDP payload */ |
4091 | /* Hardware complements the payload checksum, so we undo it | 4112 | /* Hardware complements the payload checksum, so we undo it |
4092 | * and then put the value in host order for further stack use. | 4113 | * and then put the value in host order for further stack use. |
@@ -4111,6 +4132,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4111 | struct e1000_rx_ring *rx_ring) | 4132 | struct e1000_rx_ring *rx_ring) |
4112 | #endif | 4133 | #endif |
4113 | { | 4134 | { |
4135 | struct e1000_hw *hw = &adapter->hw; | ||
4114 | struct net_device *netdev = adapter->netdev; | 4136 | struct net_device *netdev = adapter->netdev; |
4115 | struct pci_dev *pdev = adapter->pdev; | 4137 | struct pci_dev *pdev = adapter->pdev; |
4116 | struct e1000_rx_desc *rx_desc, *next_rxd; | 4138 | struct e1000_rx_desc *rx_desc, *next_rxd; |
@@ -4168,11 +4190,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4168 | 4190 | ||
4169 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 4191 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
4170 | last_byte = *(skb->data + length - 1); | 4192 | last_byte = *(skb->data + length - 1); |
4171 | if (TBI_ACCEPT(&adapter->hw, status, | 4193 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
4172 | rx_desc->errors, length, last_byte)) { | 4194 | last_byte)) { |
4173 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4195 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4174 | e1000_tbi_adjust_stats(&adapter->hw, | 4196 | e1000_tbi_adjust_stats(hw, &adapter->stats, |
4175 | &adapter->stats, | ||
4176 | length, skb->data); | 4197 | length, skb->data); |
4177 | spin_unlock_irqrestore(&adapter->stats_lock, | 4198 | spin_unlock_irqrestore(&adapter->stats_lock, |
4178 | flags); | 4199 | flags); |
@@ -4462,6 +4483,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
4462 | struct e1000_rx_ring *rx_ring, | 4483 | struct e1000_rx_ring *rx_ring, |
4463 | int cleaned_count) | 4484 | int cleaned_count) |
4464 | { | 4485 | { |
4486 | struct e1000_hw *hw = &adapter->hw; | ||
4465 | struct net_device *netdev = adapter->netdev; | 4487 | struct net_device *netdev = adapter->netdev; |
4466 | struct pci_dev *pdev = adapter->pdev; | 4488 | struct pci_dev *pdev = adapter->pdev; |
4467 | struct e1000_rx_desc *rx_desc; | 4489 | struct e1000_rx_desc *rx_desc; |
@@ -4559,7 +4581,7 @@ map_skb: | |||
4559 | * applicable for weak-ordered memory model archs, | 4581 | * applicable for weak-ordered memory model archs, |
4560 | * such as IA-64). */ | 4582 | * such as IA-64). */ |
4561 | wmb(); | 4583 | wmb(); |
4562 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | 4584 | writel(i, hw->hw_addr + rx_ring->rdt); |
4563 | } | 4585 | } |
4564 | } | 4586 | } |
4565 | 4587 | ||
@@ -4572,6 +4594,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
4572 | struct e1000_rx_ring *rx_ring, | 4594 | struct e1000_rx_ring *rx_ring, |
4573 | int cleaned_count) | 4595 | int cleaned_count) |
4574 | { | 4596 | { |
4597 | struct e1000_hw *hw = &adapter->hw; | ||
4575 | struct net_device *netdev = adapter->netdev; | 4598 | struct net_device *netdev = adapter->netdev; |
4576 | struct pci_dev *pdev = adapter->pdev; | 4599 | struct pci_dev *pdev = adapter->pdev; |
4577 | union e1000_rx_desc_packet_split *rx_desc; | 4600 | union e1000_rx_desc_packet_split *rx_desc; |
@@ -4656,7 +4679,7 @@ no_buffers: | |||
4656 | * descriptors are 32 bytes...so we increment tail | 4679 | * descriptors are 32 bytes...so we increment tail |
4657 | * twice as much. | 4680 | * twice as much. |
4658 | */ | 4681 | */ |
4659 | writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); | 4682 | writel(i<<1, hw->hw_addr + rx_ring->rdt); |
4660 | } | 4683 | } |
4661 | } | 4684 | } |
4662 | 4685 | ||
@@ -4667,46 +4690,47 @@ no_buffers: | |||
4667 | 4690 | ||
4668 | static void e1000_smartspeed(struct e1000_adapter *adapter) | 4691 | static void e1000_smartspeed(struct e1000_adapter *adapter) |
4669 | { | 4692 | { |
4693 | struct e1000_hw *hw = &adapter->hw; | ||
4670 | u16 phy_status; | 4694 | u16 phy_status; |
4671 | u16 phy_ctrl; | 4695 | u16 phy_ctrl; |
4672 | 4696 | ||
4673 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || | 4697 | if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || |
4674 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) | 4698 | !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) |
4675 | return; | 4699 | return; |
4676 | 4700 | ||
4677 | if (adapter->smartspeed == 0) { | 4701 | if (adapter->smartspeed == 0) { |
4678 | /* If Master/Slave config fault is asserted twice, | 4702 | /* If Master/Slave config fault is asserted twice, |
4679 | * we assume back-to-back */ | 4703 | * we assume back-to-back */ |
4680 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4704 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
4681 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4705 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4682 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4706 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
4683 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4707 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4684 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4708 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
4685 | if (phy_ctrl & CR_1000T_MS_ENABLE) { | 4709 | if (phy_ctrl & CR_1000T_MS_ENABLE) { |
4686 | phy_ctrl &= ~CR_1000T_MS_ENABLE; | 4710 | phy_ctrl &= ~CR_1000T_MS_ENABLE; |
4687 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, | 4711 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, |
4688 | phy_ctrl); | 4712 | phy_ctrl); |
4689 | adapter->smartspeed++; | 4713 | adapter->smartspeed++; |
4690 | if (!e1000_phy_setup_autoneg(&adapter->hw) && | 4714 | if (!e1000_phy_setup_autoneg(hw) && |
4691 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, | 4715 | !e1000_read_phy_reg(hw, PHY_CTRL, |
4692 | &phy_ctrl)) { | 4716 | &phy_ctrl)) { |
4693 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4717 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4694 | MII_CR_RESTART_AUTO_NEG); | 4718 | MII_CR_RESTART_AUTO_NEG); |
4695 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, | 4719 | e1000_write_phy_reg(hw, PHY_CTRL, |
4696 | phy_ctrl); | 4720 | phy_ctrl); |
4697 | } | 4721 | } |
4698 | } | 4722 | } |
4699 | return; | 4723 | return; |
4700 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { | 4724 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
4701 | /* If still no link, perhaps using 2/3 pair cable */ | 4725 | /* If still no link, perhaps using 2/3 pair cable */ |
4702 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4726 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
4703 | phy_ctrl |= CR_1000T_MS_ENABLE; | 4727 | phy_ctrl |= CR_1000T_MS_ENABLE; |
4704 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); | 4728 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); |
4705 | if (!e1000_phy_setup_autoneg(&adapter->hw) && | 4729 | if (!e1000_phy_setup_autoneg(hw) && |
4706 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { | 4730 | !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { |
4707 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4731 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4708 | MII_CR_RESTART_AUTO_NEG); | 4732 | MII_CR_RESTART_AUTO_NEG); |
4709 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl); | 4733 | e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); |
4710 | } | 4734 | } |
4711 | } | 4735 | } |
4712 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ | 4736 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
@@ -4744,24 +4768,25 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4744 | int cmd) | 4768 | int cmd) |
4745 | { | 4769 | { |
4746 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4770 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4771 | struct e1000_hw *hw = &adapter->hw; | ||
4747 | struct mii_ioctl_data *data = if_mii(ifr); | 4772 | struct mii_ioctl_data *data = if_mii(ifr); |
4748 | int retval; | 4773 | int retval; |
4749 | u16 mii_reg; | 4774 | u16 mii_reg; |
4750 | u16 spddplx; | 4775 | u16 spddplx; |
4751 | unsigned long flags; | 4776 | unsigned long flags; |
4752 | 4777 | ||
4753 | if (adapter->hw.media_type != e1000_media_type_copper) | 4778 | if (hw->media_type != e1000_media_type_copper) |
4754 | return -EOPNOTSUPP; | 4779 | return -EOPNOTSUPP; |
4755 | 4780 | ||
4756 | switch (cmd) { | 4781 | switch (cmd) { |
4757 | case SIOCGMIIPHY: | 4782 | case SIOCGMIIPHY: |
4758 | data->phy_id = adapter->hw.phy_addr; | 4783 | data->phy_id = hw->phy_addr; |
4759 | break; | 4784 | break; |
4760 | case SIOCGMIIREG: | 4785 | case SIOCGMIIREG: |
4761 | if (!capable(CAP_NET_ADMIN)) | 4786 | if (!capable(CAP_NET_ADMIN)) |
4762 | return -EPERM; | 4787 | return -EPERM; |
4763 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4788 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4764 | if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, | 4789 | if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, |
4765 | &data->val_out)) { | 4790 | &data->val_out)) { |
4766 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4791 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4767 | return -EIO; | 4792 | return -EIO; |
@@ -4775,20 +4800,20 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4775 | return -EFAULT; | 4800 | return -EFAULT; |
4776 | mii_reg = data->val_in; | 4801 | mii_reg = data->val_in; |
4777 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4802 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4778 | if (e1000_write_phy_reg(&adapter->hw, data->reg_num, | 4803 | if (e1000_write_phy_reg(hw, data->reg_num, |
4779 | mii_reg)) { | 4804 | mii_reg)) { |
4780 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4805 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4781 | return -EIO; | 4806 | return -EIO; |
4782 | } | 4807 | } |
4783 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4808 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4784 | if (adapter->hw.media_type == e1000_media_type_copper) { | 4809 | if (hw->media_type == e1000_media_type_copper) { |
4785 | switch (data->reg_num) { | 4810 | switch (data->reg_num) { |
4786 | case PHY_CTRL: | 4811 | case PHY_CTRL: |
4787 | if (mii_reg & MII_CR_POWER_DOWN) | 4812 | if (mii_reg & MII_CR_POWER_DOWN) |
4788 | break; | 4813 | break; |
4789 | if (mii_reg & MII_CR_AUTO_NEG_EN) { | 4814 | if (mii_reg & MII_CR_AUTO_NEG_EN) { |
4790 | adapter->hw.autoneg = 1; | 4815 | hw->autoneg = 1; |
4791 | adapter->hw.autoneg_advertised = 0x2F; | 4816 | hw->autoneg_advertised = 0x2F; |
4792 | } else { | 4817 | } else { |
4793 | if (mii_reg & 0x40) | 4818 | if (mii_reg & 0x40) |
4794 | spddplx = SPEED_1000; | 4819 | spddplx = SPEED_1000; |
@@ -4811,7 +4836,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4811 | break; | 4836 | break; |
4812 | case M88E1000_PHY_SPEC_CTRL: | 4837 | case M88E1000_PHY_SPEC_CTRL: |
4813 | case M88E1000_EXT_PHY_SPEC_CTRL: | 4838 | case M88E1000_EXT_PHY_SPEC_CTRL: |
4814 | if (e1000_phy_reset(&adapter->hw)) | 4839 | if (e1000_phy_reset(hw)) |
4815 | return -EIO; | 4840 | return -EIO; |
4816 | break; | 4841 | break; |
4817 | } | 4842 | } |
@@ -4885,6 +4910,7 @@ static void e1000_vlan_rx_register(struct net_device *netdev, | |||
4885 | struct vlan_group *grp) | 4910 | struct vlan_group *grp) |
4886 | { | 4911 | { |
4887 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4912 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4913 | struct e1000_hw *hw = &adapter->hw; | ||
4888 | u32 ctrl, rctl; | 4914 | u32 ctrl, rctl; |
4889 | 4915 | ||
4890 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4916 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -4893,22 +4919,22 @@ static void e1000_vlan_rx_register(struct net_device *netdev, | |||
4893 | 4919 | ||
4894 | if (grp) { | 4920 | if (grp) { |
4895 | /* enable VLAN tag insert/strip */ | 4921 | /* enable VLAN tag insert/strip */ |
4896 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4922 | ctrl = er32(CTRL); |
4897 | ctrl |= E1000_CTRL_VME; | 4923 | ctrl |= E1000_CTRL_VME; |
4898 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4924 | ew32(CTRL, ctrl); |
4899 | 4925 | ||
4900 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4926 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4901 | /* enable VLAN receive filtering */ | 4927 | /* enable VLAN receive filtering */ |
4902 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4928 | rctl = er32(RCTL); |
4903 | rctl &= ~E1000_RCTL_CFIEN; | 4929 | rctl &= ~E1000_RCTL_CFIEN; |
4904 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4930 | ew32(RCTL, rctl); |
4905 | e1000_update_mng_vlan(adapter); | 4931 | e1000_update_mng_vlan(adapter); |
4906 | } | 4932 | } |
4907 | } else { | 4933 | } else { |
4908 | /* disable VLAN tag insert/strip */ | 4934 | /* disable VLAN tag insert/strip */ |
4909 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4935 | ctrl = er32(CTRL); |
4910 | ctrl &= ~E1000_CTRL_VME; | 4936 | ctrl &= ~E1000_CTRL_VME; |
4911 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4937 | ew32(CTRL, ctrl); |
4912 | 4938 | ||
4913 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4939 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4914 | if (adapter->mng_vlan_id != | 4940 | if (adapter->mng_vlan_id != |
@@ -4927,22 +4953,24 @@ static void e1000_vlan_rx_register(struct net_device *netdev, | |||
4927 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 4953 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
4928 | { | 4954 | { |
4929 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4955 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4956 | struct e1000_hw *hw = &adapter->hw; | ||
4930 | u32 vfta, index; | 4957 | u32 vfta, index; |
4931 | 4958 | ||
4932 | if ((adapter->hw.mng_cookie.status & | 4959 | if ((hw->mng_cookie.status & |
4933 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4960 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4934 | (vid == adapter->mng_vlan_id)) | 4961 | (vid == adapter->mng_vlan_id)) |
4935 | return; | 4962 | return; |
4936 | /* add VID to filter table */ | 4963 | /* add VID to filter table */ |
4937 | index = (vid >> 5) & 0x7F; | 4964 | index = (vid >> 5) & 0x7F; |
4938 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); | 4965 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
4939 | vfta |= (1 << (vid & 0x1F)); | 4966 | vfta |= (1 << (vid & 0x1F)); |
4940 | e1000_write_vfta(&adapter->hw, index, vfta); | 4967 | e1000_write_vfta(hw, index, vfta); |
4941 | } | 4968 | } |
4942 | 4969 | ||
4943 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 4970 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
4944 | { | 4971 | { |
4945 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4972 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4973 | struct e1000_hw *hw = &adapter->hw; | ||
4946 | u32 vfta, index; | 4974 | u32 vfta, index; |
4947 | 4975 | ||
4948 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4976 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -4951,7 +4979,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
4951 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4979 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4952 | e1000_irq_enable(adapter); | 4980 | e1000_irq_enable(adapter); |
4953 | 4981 | ||
4954 | if ((adapter->hw.mng_cookie.status & | 4982 | if ((hw->mng_cookie.status & |
4955 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4983 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4956 | (vid == adapter->mng_vlan_id)) { | 4984 | (vid == adapter->mng_vlan_id)) { |
4957 | /* release control to f/w */ | 4985 | /* release control to f/w */ |
@@ -4961,9 +4989,9 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
4961 | 4989 | ||
4962 | /* remove VID from filter table */ | 4990 | /* remove VID from filter table */ |
4963 | index = (vid >> 5) & 0x7F; | 4991 | index = (vid >> 5) & 0x7F; |
4964 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); | 4992 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
4965 | vfta &= ~(1 << (vid & 0x1F)); | 4993 | vfta &= ~(1 << (vid & 0x1F)); |
4966 | e1000_write_vfta(&adapter->hw, index, vfta); | 4994 | e1000_write_vfta(hw, index, vfta); |
4967 | } | 4995 | } |
4968 | 4996 | ||
4969 | static void e1000_restore_vlan(struct e1000_adapter *adapter) | 4997 | static void e1000_restore_vlan(struct e1000_adapter *adapter) |
@@ -4982,10 +5010,12 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter) | |||
4982 | 5010 | ||
4983 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | 5011 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
4984 | { | 5012 | { |
4985 | adapter->hw.autoneg = 0; | 5013 | struct e1000_hw *hw = &adapter->hw; |
5014 | |||
5015 | hw->autoneg = 0; | ||
4986 | 5016 | ||
4987 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 5017 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
4988 | if ((adapter->hw.media_type == e1000_media_type_fiber) && | 5018 | if ((hw->media_type == e1000_media_type_fiber) && |
4989 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 5019 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
4990 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | 5020 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
4991 | return -EINVAL; | 5021 | return -EINVAL; |
@@ -4993,20 +5023,20 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
4993 | 5023 | ||
4994 | switch (spddplx) { | 5024 | switch (spddplx) { |
4995 | case SPEED_10 + DUPLEX_HALF: | 5025 | case SPEED_10 + DUPLEX_HALF: |
4996 | adapter->hw.forced_speed_duplex = e1000_10_half; | 5026 | hw->forced_speed_duplex = e1000_10_half; |
4997 | break; | 5027 | break; |
4998 | case SPEED_10 + DUPLEX_FULL: | 5028 | case SPEED_10 + DUPLEX_FULL: |
4999 | adapter->hw.forced_speed_duplex = e1000_10_full; | 5029 | hw->forced_speed_duplex = e1000_10_full; |
5000 | break; | 5030 | break; |
5001 | case SPEED_100 + DUPLEX_HALF: | 5031 | case SPEED_100 + DUPLEX_HALF: |
5002 | adapter->hw.forced_speed_duplex = e1000_100_half; | 5032 | hw->forced_speed_duplex = e1000_100_half; |
5003 | break; | 5033 | break; |
5004 | case SPEED_100 + DUPLEX_FULL: | 5034 | case SPEED_100 + DUPLEX_FULL: |
5005 | adapter->hw.forced_speed_duplex = e1000_100_full; | 5035 | hw->forced_speed_duplex = e1000_100_full; |
5006 | break; | 5036 | break; |
5007 | case SPEED_1000 + DUPLEX_FULL: | 5037 | case SPEED_1000 + DUPLEX_FULL: |
5008 | adapter->hw.autoneg = 1; | 5038 | hw->autoneg = 1; |
5009 | adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; | 5039 | hw->autoneg_advertised = ADVERTISE_1000_FULL; |
5010 | break; | 5040 | break; |
5011 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | 5041 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
5012 | default: | 5042 | default: |
@@ -5020,6 +5050,7 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5020 | { | 5050 | { |
5021 | struct net_device *netdev = pci_get_drvdata(pdev); | 5051 | struct net_device *netdev = pci_get_drvdata(pdev); |
5022 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5052 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5053 | struct e1000_hw *hw = &adapter->hw; | ||
5023 | u32 ctrl, ctrl_ext, rctl, status; | 5054 | u32 ctrl, ctrl_ext, rctl, status; |
5024 | u32 wufc = adapter->wol; | 5055 | u32 wufc = adapter->wol; |
5025 | #ifdef CONFIG_PM | 5056 | #ifdef CONFIG_PM |
@@ -5039,7 +5070,7 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5039 | return retval; | 5070 | return retval; |
5040 | #endif | 5071 | #endif |
5041 | 5072 | ||
5042 | status = E1000_READ_REG(&adapter->hw, STATUS); | 5073 | status = er32(STATUS); |
5043 | if (status & E1000_STATUS_LU) | 5074 | if (status & E1000_STATUS_LU) |
5044 | wufc &= ~E1000_WUFC_LNKC; | 5075 | wufc &= ~E1000_WUFC_LNKC; |
5045 | 5076 | ||
@@ -5049,40 +5080,40 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5049 | 5080 | ||
5050 | /* turn on all-multi mode if wake on multicast is enabled */ | 5081 | /* turn on all-multi mode if wake on multicast is enabled */ |
5051 | if (wufc & E1000_WUFC_MC) { | 5082 | if (wufc & E1000_WUFC_MC) { |
5052 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 5083 | rctl = er32(RCTL); |
5053 | rctl |= E1000_RCTL_MPE; | 5084 | rctl |= E1000_RCTL_MPE; |
5054 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 5085 | ew32(RCTL, rctl); |
5055 | } | 5086 | } |
5056 | 5087 | ||
5057 | if (adapter->hw.mac_type >= e1000_82540) { | 5088 | if (hw->mac_type >= e1000_82540) { |
5058 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 5089 | ctrl = er32(CTRL); |
5059 | /* advertise wake from D3Cold */ | 5090 | /* advertise wake from D3Cold */ |
5060 | #define E1000_CTRL_ADVD3WUC 0x00100000 | 5091 | #define E1000_CTRL_ADVD3WUC 0x00100000 |
5061 | /* phy power management enable */ | 5092 | /* phy power management enable */ |
5062 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | 5093 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
5063 | ctrl |= E1000_CTRL_ADVD3WUC | | 5094 | ctrl |= E1000_CTRL_ADVD3WUC | |
5064 | E1000_CTRL_EN_PHY_PWR_MGMT; | 5095 | E1000_CTRL_EN_PHY_PWR_MGMT; |
5065 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 5096 | ew32(CTRL, ctrl); |
5066 | } | 5097 | } |
5067 | 5098 | ||
5068 | if (adapter->hw.media_type == e1000_media_type_fiber || | 5099 | if (hw->media_type == e1000_media_type_fiber || |
5069 | adapter->hw.media_type == e1000_media_type_internal_serdes) { | 5100 | hw->media_type == e1000_media_type_internal_serdes) { |
5070 | /* keep the laser running in D3 */ | 5101 | /* keep the laser running in D3 */ |
5071 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 5102 | ctrl_ext = er32(CTRL_EXT); |
5072 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | 5103 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
5073 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); | 5104 | ew32(CTRL_EXT, ctrl_ext); |
5074 | } | 5105 | } |
5075 | 5106 | ||
5076 | /* Allow time for pending master requests to run */ | 5107 | /* Allow time for pending master requests to run */ |
5077 | e1000_disable_pciex_master(&adapter->hw); | 5108 | e1000_disable_pciex_master(hw); |
5078 | 5109 | ||
5079 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); | 5110 | ew32(WUC, E1000_WUC_PME_EN); |
5080 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); | 5111 | ew32(WUFC, wufc); |
5081 | pci_enable_wake(pdev, PCI_D3hot, 1); | 5112 | pci_enable_wake(pdev, PCI_D3hot, 1); |
5082 | pci_enable_wake(pdev, PCI_D3cold, 1); | 5113 | pci_enable_wake(pdev, PCI_D3cold, 1); |
5083 | } else { | 5114 | } else { |
5084 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 5115 | ew32(WUC, 0); |
5085 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); | 5116 | ew32(WUFC, 0); |
5086 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5117 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5087 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5118 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5088 | } | 5119 | } |
@@ -5095,8 +5126,8 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5095 | pci_enable_wake(pdev, PCI_D3cold, 1); | 5126 | pci_enable_wake(pdev, PCI_D3cold, 1); |
5096 | } | 5127 | } |
5097 | 5128 | ||
5098 | if (adapter->hw.phy_type == e1000_phy_igp_3) | 5129 | if (hw->phy_type == e1000_phy_igp_3) |
5099 | e1000_phy_powerdown_workaround(&adapter->hw); | 5130 | e1000_phy_powerdown_workaround(hw); |
5100 | 5131 | ||
5101 | if (netif_running(netdev)) | 5132 | if (netif_running(netdev)) |
5102 | e1000_free_irq(adapter); | 5133 | e1000_free_irq(adapter); |
@@ -5117,6 +5148,7 @@ static int e1000_resume(struct pci_dev *pdev) | |||
5117 | { | 5148 | { |
5118 | struct net_device *netdev = pci_get_drvdata(pdev); | 5149 | struct net_device *netdev = pci_get_drvdata(pdev); |
5119 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5150 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5151 | struct e1000_hw *hw = &adapter->hw; | ||
5120 | u32 err; | 5152 | u32 err; |
5121 | 5153 | ||
5122 | pci_set_power_state(pdev, PCI_D0); | 5154 | pci_set_power_state(pdev, PCI_D0); |
@@ -5135,7 +5167,7 @@ static int e1000_resume(struct pci_dev *pdev) | |||
5135 | 5167 | ||
5136 | e1000_power_up_phy(adapter); | 5168 | e1000_power_up_phy(adapter); |
5137 | e1000_reset(adapter); | 5169 | e1000_reset(adapter); |
5138 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 5170 | ew32(WUS, ~0); |
5139 | 5171 | ||
5140 | e1000_init_manageability(adapter); | 5172 | e1000_init_manageability(adapter); |
5141 | 5173 | ||
@@ -5148,8 +5180,8 @@ static int e1000_resume(struct pci_dev *pdev) | |||
5148 | * DRV_LOAD until the interface is up. For all other cases, | 5180 | * DRV_LOAD until the interface is up. For all other cases, |
5149 | * let the f/w know that the h/w is now under the control | 5181 | * let the f/w know that the h/w is now under the control |
5150 | * of the driver. */ | 5182 | * of the driver. */ |
5151 | if (adapter->hw.mac_type != e1000_82573 || | 5183 | if (hw->mac_type != e1000_82573 || |
5152 | !e1000_check_mng_mode(&adapter->hw)) | 5184 | !e1000_check_mng_mode(hw)) |
5153 | e1000_get_hw_control(adapter); | 5185 | e1000_get_hw_control(adapter); |
5154 | 5186 | ||
5155 | return 0; | 5187 | return 0; |
@@ -5215,6 +5247,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
5215 | { | 5247 | { |
5216 | struct net_device *netdev = pci_get_drvdata(pdev); | 5248 | struct net_device *netdev = pci_get_drvdata(pdev); |
5217 | struct e1000_adapter *adapter = netdev->priv; | 5249 | struct e1000_adapter *adapter = netdev->priv; |
5250 | struct e1000_hw *hw = &adapter->hw; | ||
5218 | 5251 | ||
5219 | if (pci_enable_device(pdev)) { | 5252 | if (pci_enable_device(pdev)) { |
5220 | printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); | 5253 | printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); |
@@ -5226,7 +5259,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
5226 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5259 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5227 | 5260 | ||
5228 | e1000_reset(adapter); | 5261 | e1000_reset(adapter); |
5229 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 5262 | ew32(WUS, ~0); |
5230 | 5263 | ||
5231 | return PCI_ERS_RESULT_RECOVERED; | 5264 | return PCI_ERS_RESULT_RECOVERED; |
5232 | } | 5265 | } |
@@ -5243,6 +5276,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5243 | { | 5276 | { |
5244 | struct net_device *netdev = pci_get_drvdata(pdev); | 5277 | struct net_device *netdev = pci_get_drvdata(pdev); |
5245 | struct e1000_adapter *adapter = netdev->priv; | 5278 | struct e1000_adapter *adapter = netdev->priv; |
5279 | struct e1000_hw *hw = &adapter->hw; | ||
5246 | 5280 | ||
5247 | e1000_init_manageability(adapter); | 5281 | e1000_init_manageability(adapter); |
5248 | 5282 | ||
@@ -5259,8 +5293,8 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5259 | * DRV_LOAD until the interface is up. For all other cases, | 5293 | * DRV_LOAD until the interface is up. For all other cases, |
5260 | * let the f/w know that the h/w is now under the control | 5294 | * let the f/w know that the h/w is now under the control |
5261 | * of the driver. */ | 5295 | * of the driver. */ |
5262 | if (adapter->hw.mac_type != e1000_82573 || | 5296 | if (hw->mac_type != e1000_82573 || |
5263 | !e1000_check_mng_mode(&adapter->hw)) | 5297 | !e1000_check_mng_mode(hw)) |
5264 | e1000_get_hw_control(adapter); | 5298 | e1000_get_hw_control(adapter); |
5265 | 5299 | ||
5266 | } | 5300 | } |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 365626d3177e..acb5134ab22e 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -55,13 +55,13 @@ | |||
55 | #define DEBUGOUT7 DEBUGOUT3 | 55 | #define DEBUGOUT7 DEBUGOUT3 |
56 | 56 | ||
57 | 57 | ||
58 | #define E1000_WRITE_REG(a, reg, value) ( \ | 58 | #define er32(reg)\ |
59 | writel((value), ((a)->hw_addr + \ | 59 | (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ |
60 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))) | 60 | ? E1000_##reg : E1000_82542_##reg))) |
61 | 61 | ||
62 | #define E1000_READ_REG(a, reg) ( \ | 62 | #define ew32(reg, value) \ |
63 | readl((a)->hw_addr + \ | 63 | (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ |
64 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))) | 64 | ? E1000_##reg : E1000_82542_##reg)))) |
65 | 65 | ||
66 | #define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ | 66 | #define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ |
67 | writel((value), ((a)->hw_addr + \ | 67 | writel((value), ((a)->hw_addr + \ |
@@ -96,7 +96,7 @@ | |||
96 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ | 96 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ |
97 | (offset))) | 97 | (offset))) |
98 | 98 | ||
99 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) | 99 | #define E1000_WRITE_FLUSH() er32(STATUS) |
100 | 100 | ||
101 | #define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ | 101 | #define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ |
102 | writel((value), ((a)->flash_address + reg))) | 102 | writel((value), ((a)->flash_address + reg))) |