diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/e1000e | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r-- | drivers/net/e1000e/82571.c | 419 | ||||
-rw-r--r-- | drivers/net/e1000e/defines.h | 8 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 74 | ||||
-rw-r--r-- | drivers/net/e1000e/es2lan.c | 344 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 84 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 72 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 625 | ||||
-rw-r--r-- | drivers/net/e1000e/lib.c | 535 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 679 | ||||
-rw-r--r-- | drivers/net/e1000e/param.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 585 |
11 files changed, 1864 insertions, 1563 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index d1e0563a67df..90155552ea09 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -43,10 +43,6 @@ | |||
43 | * 82583V Gigabit Network Connection | 43 | * 82583V Gigabit Network Connection |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/delay.h> | ||
48 | #include <linux/pci.h> | ||
49 | |||
50 | #include "e1000.h" | 46 | #include "e1000.h" |
51 | 47 | ||
52 | #define ID_LED_RESERVED_F746 0xF746 | 48 | #define ID_LED_RESERVED_F746 0xF746 |
@@ -69,15 +65,15 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); | |||
69 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); | 65 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); |
70 | static s32 e1000_setup_link_82571(struct e1000_hw *hw); | 66 | static s32 e1000_setup_link_82571(struct e1000_hw *hw); |
71 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); | 67 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); |
68 | static void e1000_clear_vfta_82571(struct e1000_hw *hw); | ||
72 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); | 69 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); |
73 | static s32 e1000_led_on_82574(struct e1000_hw *hw); | 70 | static s32 e1000_led_on_82574(struct e1000_hw *hw); |
74 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | 71 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); |
72 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); | ||
75 | 73 | ||
76 | /** | 74 | /** |
77 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | 75 | * e1000_init_phy_params_82571 - Init PHY func ptrs. |
78 | * @hw: pointer to the HW structure | 76 | * @hw: pointer to the HW structure |
79 | * | ||
80 | * This is a function pointer entry point called by the api module. | ||
81 | **/ | 77 | **/ |
82 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | 78 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) |
83 | { | 79 | { |
@@ -93,6 +89,9 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
93 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 89 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
94 | phy->reset_delay_us = 100; | 90 | phy->reset_delay_us = 100; |
95 | 91 | ||
92 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
93 | phy->ops.power_down = e1000_power_down_phy_copper_82571; | ||
94 | |||
96 | switch (hw->mac.type) { | 95 | switch (hw->mac.type) { |
97 | case e1000_82571: | 96 | case e1000_82571: |
98 | case e1000_82572: | 97 | case e1000_82572: |
@@ -140,8 +139,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
140 | /** | 139 | /** |
141 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. | 140 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. |
142 | * @hw: pointer to the HW structure | 141 | * @hw: pointer to the HW structure |
143 | * | ||
144 | * This is a function pointer entry point called by the api module. | ||
145 | **/ | 142 | **/ |
146 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | 143 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) |
147 | { | 144 | { |
@@ -205,8 +202,6 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
205 | /** | 202 | /** |
206 | * e1000_init_mac_params_82571 - Init MAC func ptrs. | 203 | * e1000_init_mac_params_82571 - Init MAC func ptrs. |
207 | * @hw: pointer to the HW structure | 204 | * @hw: pointer to the HW structure |
208 | * | ||
209 | * This is a function pointer entry point called by the api module. | ||
210 | **/ | 205 | **/ |
211 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | 206 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) |
212 | { | 207 | { |
@@ -240,7 +235,10 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
240 | /* Set rar entry count */ | 235 | /* Set rar entry count */ |
241 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 236 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
242 | /* Set if manageability features are enabled. */ | 237 | /* Set if manageability features are enabled. */ |
243 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 238 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
239 | ? true : false; | ||
240 | /* Adaptive IFS supported */ | ||
241 | mac->adaptive_ifs = true; | ||
244 | 242 | ||
245 | /* check for link */ | 243 | /* check for link */ |
246 | switch (hw->phy.media_type) { | 244 | switch (hw->phy.media_type) { |
@@ -269,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
269 | } | 267 | } |
270 | 268 | ||
271 | switch (hw->mac.type) { | 269 | switch (hw->mac.type) { |
270 | case e1000_82573: | ||
271 | func->set_lan_id = e1000_set_lan_id_single_port; | ||
272 | func->check_mng_mode = e1000e_check_mng_mode_generic; | ||
273 | func->led_on = e1000e_led_on_generic; | ||
274 | break; | ||
272 | case e1000_82574: | 275 | case e1000_82574: |
273 | case e1000_82583: | 276 | case e1000_82583: |
277 | func->set_lan_id = e1000_set_lan_id_single_port; | ||
274 | func->check_mng_mode = e1000_check_mng_mode_82574; | 278 | func->check_mng_mode = e1000_check_mng_mode_82574; |
275 | func->led_on = e1000_led_on_82574; | 279 | func->led_on = e1000_led_on_82574; |
276 | break; | 280 | break; |
@@ -313,7 +317,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
313 | * indicates that the bootagent or EFI code has | 317 | * indicates that the bootagent or EFI code has |
314 | * improperly left this bit enabled | 318 | * improperly left this bit enabled |
315 | */ | 319 | */ |
316 | hw_dbg(hw, "Please update your 82571 Bootagent\n"); | 320 | e_dbg("Please update your 82571 Bootagent\n"); |
317 | } | 321 | } |
318 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | 322 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); |
319 | } | 323 | } |
@@ -332,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
332 | struct e1000_hw *hw = &adapter->hw; | 336 | struct e1000_hw *hw = &adapter->hw; |
333 | static int global_quad_port_a; /* global port a indication */ | 337 | static int global_quad_port_a; /* global port a indication */ |
334 | struct pci_dev *pdev = adapter->pdev; | 338 | struct pci_dev *pdev = adapter->pdev; |
335 | u16 eeprom_data = 0; | ||
336 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; | 339 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; |
337 | s32 rc; | 340 | s32 rc; |
338 | 341 | ||
@@ -383,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
383 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) | 386 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) |
384 | adapter->flags &= ~FLAG_HAS_WOL; | 387 | adapter->flags &= ~FLAG_HAS_WOL; |
385 | break; | 388 | break; |
386 | |||
387 | case e1000_82573: | 389 | case e1000_82573: |
390 | case e1000_82574: | ||
391 | case e1000_82583: | ||
392 | /* Disable ASPM L0s due to hardware errata */ | ||
393 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S); | ||
394 | |||
388 | if (pdev->device == E1000_DEV_ID_82573L) { | 395 | if (pdev->device == E1000_DEV_ID_82573L) { |
389 | if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, | 396 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; |
390 | &eeprom_data) < 0) | 397 | adapter->max_hw_frame_size = DEFAULT_JUMBO; |
391 | break; | ||
392 | if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) { | ||
393 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; | ||
394 | adapter->max_hw_frame_size = DEFAULT_JUMBO; | ||
395 | } | ||
396 | } | 398 | } |
397 | break; | 399 | break; |
398 | default: | 400 | default: |
@@ -487,7 +489,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
487 | } | 489 | } |
488 | 490 | ||
489 | if (i == sw_timeout) { | 491 | if (i == sw_timeout) { |
490 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 492 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
491 | hw->dev_spec.e82571.smb_counter++; | 493 | hw->dev_spec.e82571.smb_counter++; |
492 | } | 494 | } |
493 | /* Get the FW semaphore. */ | 495 | /* Get the FW semaphore. */ |
@@ -505,7 +507,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
505 | if (i == fw_timeout) { | 507 | if (i == fw_timeout) { |
506 | /* Release semaphores */ | 508 | /* Release semaphores */ |
507 | e1000_put_hw_semaphore_82571(hw); | 509 | e1000_put_hw_semaphore_82571(hw); |
508 | hw_dbg(hw, "Driver can't access the NVM\n"); | 510 | e_dbg("Driver can't access the NVM\n"); |
509 | return -E1000_ERR_NVM; | 511 | return -E1000_ERR_NVM; |
510 | } | 512 | } |
511 | 513 | ||
@@ -702,8 +704,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
702 | u16 words, u16 *data) | 704 | u16 words, u16 *data) |
703 | { | 705 | { |
704 | struct e1000_nvm_info *nvm = &hw->nvm; | 706 | struct e1000_nvm_info *nvm = &hw->nvm; |
705 | u32 i; | 707 | u32 i, eewr = 0; |
706 | u32 eewr = 0; | ||
707 | s32 ret_val = 0; | 708 | s32 ret_val = 0; |
708 | 709 | ||
709 | /* | 710 | /* |
@@ -712,7 +713,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
712 | */ | 713 | */ |
713 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 714 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
714 | (words == 0)) { | 715 | (words == 0)) { |
715 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 716 | e_dbg("nvm parameter(s) out of bounds\n"); |
716 | return -E1000_ERR_NVM; | 717 | return -E1000_ERR_NVM; |
717 | } | 718 | } |
718 | 719 | ||
@@ -753,7 +754,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
753 | timeout--; | 754 | timeout--; |
754 | } | 755 | } |
755 | if (!timeout) { | 756 | if (!timeout) { |
756 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 757 | e_dbg("MNG configuration cycle has not completed.\n"); |
757 | return -E1000_ERR_RESET; | 758 | return -E1000_ERR_RESET; |
758 | } | 759 | } |
759 | 760 | ||
@@ -763,7 +764,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
763 | /** | 764 | /** |
764 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state | 765 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state |
765 | * @hw: pointer to the HW structure | 766 | * @hw: pointer to the HW structure |
766 | * @active: TRUE to enable LPLU, FALSE to disable | 767 | * @active: true to enable LPLU, false to disable |
767 | * | 768 | * |
768 | * Sets the LPLU D0 state according to the active flag. When activating LPLU | 769 | * Sets the LPLU D0 state according to the active flag. When activating LPLU |
769 | * this function also disables smart speed and vice versa. LPLU will not be | 770 | * this function also disables smart speed and vice versa. LPLU will not be |
@@ -834,15 +835,11 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
834 | * e1000_reset_hw_82571 - Reset hardware | 835 | * e1000_reset_hw_82571 - Reset hardware |
835 | * @hw: pointer to the HW structure | 836 | * @hw: pointer to the HW structure |
836 | * | 837 | * |
837 | * This resets the hardware into a known state. This is a | 838 | * This resets the hardware into a known state. |
838 | * function pointer entry point called by the api module. | ||
839 | **/ | 839 | **/ |
840 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 840 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
841 | { | 841 | { |
842 | u32 ctrl; | 842 | u32 ctrl, extcnf_ctrl, ctrl_ext, icr; |
843 | u32 extcnf_ctrl; | ||
844 | u32 ctrl_ext; | ||
845 | u32 icr; | ||
846 | s32 ret_val; | 843 | s32 ret_val; |
847 | u16 i = 0; | 844 | u16 i = 0; |
848 | 845 | ||
@@ -852,9 +849,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
852 | */ | 849 | */ |
853 | ret_val = e1000e_disable_pcie_master(hw); | 850 | ret_val = e1000e_disable_pcie_master(hw); |
854 | if (ret_val) | 851 | if (ret_val) |
855 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 852 | e_dbg("PCI-E Master disable polling has failed.\n"); |
856 | 853 | ||
857 | hw_dbg(hw, "Masking off all interrupts\n"); | 854 | e_dbg("Masking off all interrupts\n"); |
858 | ew32(IMC, 0xffffffff); | 855 | ew32(IMC, 0xffffffff); |
859 | 856 | ||
860 | ew32(RCTL, 0); | 857 | ew32(RCTL, 0); |
@@ -893,7 +890,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
893 | 890 | ||
894 | ctrl = er32(CTRL); | 891 | ctrl = er32(CTRL); |
895 | 892 | ||
896 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 893 | e_dbg("Issuing a global reset to MAC\n"); |
897 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 894 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
898 | 895 | ||
899 | if (hw->nvm.type == e1000_nvm_flash_hw) { | 896 | if (hw->nvm.type == e1000_nvm_flash_hw) { |
@@ -929,9 +926,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
929 | ew32(IMC, 0xffffffff); | 926 | ew32(IMC, 0xffffffff); |
930 | icr = er32(ICR); | 927 | icr = er32(ICR); |
931 | 928 | ||
932 | if (hw->mac.type == e1000_82571 && | 929 | /* Install any alternate MAC address into RAR0 */ |
933 | hw->dev_spec.e82571.alt_mac_addr_is_present) | 930 | ret_val = e1000_check_alt_mac_addr_generic(hw); |
934 | e1000e_set_laa_state_82571(hw, true); | 931 | if (ret_val) |
932 | return ret_val; | ||
933 | |||
934 | e1000e_set_laa_state_82571(hw, true); | ||
935 | 935 | ||
936 | /* Reinitialize the 82571 serdes link state machine */ | 936 | /* Reinitialize the 82571 serdes link state machine */ |
937 | if (hw->phy.media_type == e1000_media_type_internal_serdes) | 937 | if (hw->phy.media_type == e1000_media_type_internal_serdes) |
@@ -951,21 +951,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
951 | struct e1000_mac_info *mac = &hw->mac; | 951 | struct e1000_mac_info *mac = &hw->mac; |
952 | u32 reg_data; | 952 | u32 reg_data; |
953 | s32 ret_val; | 953 | s32 ret_val; |
954 | u16 i; | 954 | u16 i, rar_count = mac->rar_entry_count; |
955 | u16 rar_count = mac->rar_entry_count; | ||
956 | 955 | ||
957 | e1000_initialize_hw_bits_82571(hw); | 956 | e1000_initialize_hw_bits_82571(hw); |
958 | 957 | ||
959 | /* Initialize identification LED */ | 958 | /* Initialize identification LED */ |
960 | ret_val = e1000e_id_led_init(hw); | 959 | ret_val = e1000e_id_led_init(hw); |
961 | if (ret_val) { | 960 | if (ret_val) |
962 | hw_dbg(hw, "Error initializing identification LED\n"); | 961 | e_dbg("Error initializing identification LED\n"); |
963 | return ret_val; | 962 | /* This is not fatal and we should not stop init due to this */ |
964 | } | ||
965 | 963 | ||
966 | /* Disabling VLAN filtering */ | 964 | /* Disabling VLAN filtering */ |
967 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 965 | e_dbg("Initializing the IEEE VLAN\n"); |
968 | e1000e_clear_vfta(hw); | 966 | mac->ops.clear_vfta(hw); |
969 | 967 | ||
970 | /* Setup the receive address. */ | 968 | /* Setup the receive address. */ |
971 | /* | 969 | /* |
@@ -978,7 +976,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
978 | e1000e_init_rx_addrs(hw, rar_count); | 976 | e1000e_init_rx_addrs(hw, rar_count); |
979 | 977 | ||
980 | /* Zero out the Multicast HASH table */ | 978 | /* Zero out the Multicast HASH table */ |
981 | hw_dbg(hw, "Zeroing the MTA\n"); | 979 | e_dbg("Zeroing the MTA\n"); |
982 | for (i = 0; i < mac->mta_reg_count; i++) | 980 | for (i = 0; i < mac->mta_reg_count; i++) |
983 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 981 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
984 | 982 | ||
@@ -1125,6 +1123,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1125 | reg |= (1 << 22); | 1123 | reg |= (1 << 22); |
1126 | ew32(GCR, reg); | 1124 | ew32(GCR, reg); |
1127 | 1125 | ||
1126 | /* | ||
1127 | * Workaround for hardware errata. | ||
1128 | * apply workaround for hardware errata documented in errata | ||
1129 | * docs Fixes issue where some error prone or unreliable PCIe | ||
1130 | * completions are occurring, particularly with ASPM enabled. | ||
1131 | * Without fix, issue can cause tx timeouts. | ||
1132 | */ | ||
1128 | reg = er32(GCR2); | 1133 | reg = er32(GCR2); |
1129 | reg |= 1; | 1134 | reg |= 1; |
1130 | ew32(GCR2, reg); | 1135 | ew32(GCR2, reg); |
@@ -1137,13 +1142,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1137 | } | 1142 | } |
1138 | 1143 | ||
1139 | /** | 1144 | /** |
1140 | * e1000e_clear_vfta - Clear VLAN filter table | 1145 | * e1000_clear_vfta_82571 - Clear VLAN filter table |
1141 | * @hw: pointer to the HW structure | 1146 | * @hw: pointer to the HW structure |
1142 | * | 1147 | * |
1143 | * Clears the register array which contains the VLAN filter table by | 1148 | * Clears the register array which contains the VLAN filter table by |
1144 | * setting all the values to 0. | 1149 | * setting all the values to 0. |
1145 | **/ | 1150 | **/ |
1146 | void e1000e_clear_vfta(struct e1000_hw *hw) | 1151 | static void e1000_clear_vfta_82571(struct e1000_hw *hw) |
1147 | { | 1152 | { |
1148 | u32 offset; | 1153 | u32 offset; |
1149 | u32 vfta_value = 0; | 1154 | u32 vfta_value = 0; |
@@ -1227,32 +1232,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw) | |||
1227 | } | 1232 | } |
1228 | 1233 | ||
1229 | /** | 1234 | /** |
1230 | * e1000_update_mc_addr_list_82571 - Update Multicast addresses | ||
1231 | * @hw: pointer to the HW structure | ||
1232 | * @mc_addr_list: array of multicast addresses to program | ||
1233 | * @mc_addr_count: number of multicast addresses to program | ||
1234 | * @rar_used_count: the first RAR register free to program | ||
1235 | * @rar_count: total number of supported Receive Address Registers | ||
1236 | * | ||
1237 | * Updates the Receive Address Registers and Multicast Table Array. | ||
1238 | * The caller must have a packed mc_addr_list of multicast addresses. | ||
1239 | * The parameter rar_count will usually be hw->mac.rar_entry_count | ||
1240 | * unless there are workarounds that change this. | ||
1241 | **/ | ||
1242 | static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw, | ||
1243 | u8 *mc_addr_list, | ||
1244 | u32 mc_addr_count, | ||
1245 | u32 rar_used_count, | ||
1246 | u32 rar_count) | ||
1247 | { | ||
1248 | if (e1000e_get_laa_state_82571(hw)) | ||
1249 | rar_count--; | ||
1250 | |||
1251 | e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count, | ||
1252 | rar_used_count, rar_count); | ||
1253 | } | ||
1254 | |||
1255 | /** | ||
1256 | * e1000_setup_link_82571 - Setup flow control and link settings | 1235 | * e1000_setup_link_82571 - Setup flow control and link settings |
1257 | * @hw: pointer to the HW structure | 1236 | * @hw: pointer to the HW structure |
1258 | * | 1237 | * |
@@ -1294,7 +1273,6 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw) | |||
1294 | static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) | 1273 | static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) |
1295 | { | 1274 | { |
1296 | u32 ctrl; | 1275 | u32 ctrl; |
1297 | u32 led_ctrl; | ||
1298 | s32 ret_val; | 1276 | s32 ret_val; |
1299 | 1277 | ||
1300 | ctrl = er32(CTRL); | 1278 | ctrl = er32(CTRL); |
@@ -1309,11 +1287,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) | |||
1309 | break; | 1287 | break; |
1310 | case e1000_phy_igp_2: | 1288 | case e1000_phy_igp_2: |
1311 | ret_val = e1000e_copper_link_setup_igp(hw); | 1289 | ret_val = e1000e_copper_link_setup_igp(hw); |
1312 | /* Setup activity LED */ | ||
1313 | led_ctrl = er32(LEDCTL); | ||
1314 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | ||
1315 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | ||
1316 | ew32(LEDCTL, led_ctrl); | ||
1317 | break; | 1290 | break; |
1318 | default: | 1291 | default: |
1319 | return -E1000_ERR_PHY; | 1292 | return -E1000_ERR_PHY; |
@@ -1360,8 +1333,20 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) | |||
1360 | * e1000_check_for_serdes_link_82571 - Check for link (Serdes) | 1333 | * e1000_check_for_serdes_link_82571 - Check for link (Serdes) |
1361 | * @hw: pointer to the HW structure | 1334 | * @hw: pointer to the HW structure |
1362 | * | 1335 | * |
1363 | * Checks for link up on the hardware. If link is not up and we have | 1336 | * Reports the link state as up or down. |
1364 | * a signal, then we need to force link up. | 1337 | * |
1338 | * If autonegotiation is supported by the link partner, the link state is | ||
1339 | * determined by the result of autonegotiation. This is the most likely case. | ||
1340 | * If autonegotiation is not supported by the link partner, and the link | ||
1341 | * has a valid signal, force the link up. | ||
1342 | * | ||
1343 | * The link state is represented internally here by 4 states: | ||
1344 | * | ||
1345 | * 1) down | ||
1346 | * 2) autoneg_progress | ||
1347 | * 3) autoneg_complete (the link successfully autonegotiated) | ||
1348 | * 4) forced_up (the link has been forced up, it did not autonegotiate) | ||
1349 | * | ||
1365 | **/ | 1350 | **/ |
1366 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | 1351 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) |
1367 | { | 1352 | { |
@@ -1387,7 +1372,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1387 | */ | 1372 | */ |
1388 | mac->serdes_link_state = | 1373 | mac->serdes_link_state = |
1389 | e1000_serdes_link_autoneg_progress; | 1374 | e1000_serdes_link_autoneg_progress; |
1390 | hw_dbg(hw, "AN_UP -> AN_PROG\n"); | 1375 | mac->serdes_has_link = false; |
1376 | e_dbg("AN_UP -> AN_PROG\n"); | ||
1391 | } | 1377 | } |
1392 | break; | 1378 | break; |
1393 | 1379 | ||
@@ -1401,79 +1387,86 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1401 | if (rxcw & E1000_RXCW_C) { | 1387 | if (rxcw & E1000_RXCW_C) { |
1402 | /* Enable autoneg, and unforce link up */ | 1388 | /* Enable autoneg, and unforce link up */ |
1403 | ew32(TXCW, mac->txcw); | 1389 | ew32(TXCW, mac->txcw); |
1404 | ew32(CTRL, | 1390 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
1405 | (ctrl & ~E1000_CTRL_SLU)); | ||
1406 | mac->serdes_link_state = | 1391 | mac->serdes_link_state = |
1407 | e1000_serdes_link_autoneg_progress; | 1392 | e1000_serdes_link_autoneg_progress; |
1408 | hw_dbg(hw, "FORCED_UP -> AN_PROG\n"); | 1393 | mac->serdes_has_link = false; |
1394 | e_dbg("FORCED_UP -> AN_PROG\n"); | ||
1409 | } | 1395 | } |
1410 | break; | 1396 | break; |
1411 | 1397 | ||
1412 | case e1000_serdes_link_autoneg_progress: | 1398 | case e1000_serdes_link_autoneg_progress: |
1413 | /* | 1399 | if (rxcw & E1000_RXCW_C) { |
1414 | * If the LU bit is set in the STATUS register, | 1400 | /* |
1415 | * autoneg has completed sucessfully. If not, | 1401 | * We received /C/ ordered sets, meaning the |
1416 | * try foring the link because the far end may be | 1402 | * link partner has autonegotiated, and we can |
1417 | * available but not capable of autonegotiation. | 1403 | * trust the Link Up (LU) status bit. |
1418 | */ | 1404 | */ |
1419 | if (status & E1000_STATUS_LU) { | 1405 | if (status & E1000_STATUS_LU) { |
1420 | mac->serdes_link_state = | 1406 | mac->serdes_link_state = |
1421 | e1000_serdes_link_autoneg_complete; | 1407 | e1000_serdes_link_autoneg_complete; |
1422 | hw_dbg(hw, "AN_PROG -> AN_UP\n"); | 1408 | e_dbg("AN_PROG -> AN_UP\n"); |
1409 | mac->serdes_has_link = true; | ||
1410 | } else { | ||
1411 | /* Autoneg completed, but failed. */ | ||
1412 | mac->serdes_link_state = | ||
1413 | e1000_serdes_link_down; | ||
1414 | e_dbg("AN_PROG -> DOWN\n"); | ||
1415 | } | ||
1423 | } else { | 1416 | } else { |
1424 | /* | 1417 | /* |
1425 | * Disable autoneg, force link up and | 1418 | * The link partner did not autoneg. |
1426 | * full duplex, and change state to forced | 1419 | * Force link up and full duplex, and change |
1420 | * state to forced. | ||
1427 | */ | 1421 | */ |
1428 | ew32(TXCW, | 1422 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
1429 | (mac->txcw & ~E1000_TXCW_ANE)); | ||
1430 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | 1423 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
1431 | ew32(CTRL, ctrl); | 1424 | ew32(CTRL, ctrl); |
1432 | 1425 | ||
1433 | /* Configure Flow Control after link up. */ | 1426 | /* Configure Flow Control after link up. */ |
1434 | ret_val = | 1427 | ret_val = e1000e_config_fc_after_link_up(hw); |
1435 | e1000e_config_fc_after_link_up(hw); | ||
1436 | if (ret_val) { | 1428 | if (ret_val) { |
1437 | hw_dbg(hw, "Error config flow control\n"); | 1429 | e_dbg("Error config flow control\n"); |
1438 | break; | 1430 | break; |
1439 | } | 1431 | } |
1440 | mac->serdes_link_state = | 1432 | mac->serdes_link_state = |
1441 | e1000_serdes_link_forced_up; | 1433 | e1000_serdes_link_forced_up; |
1442 | hw_dbg(hw, "AN_PROG -> FORCED_UP\n"); | 1434 | mac->serdes_has_link = true; |
1435 | e_dbg("AN_PROG -> FORCED_UP\n"); | ||
1443 | } | 1436 | } |
1444 | mac->serdes_has_link = true; | ||
1445 | break; | 1437 | break; |
1446 | 1438 | ||
1447 | case e1000_serdes_link_down: | 1439 | case e1000_serdes_link_down: |
1448 | default: | 1440 | default: |
1449 | /* The link was down but the receiver has now gained | 1441 | /* |
1442 | * The link was down but the receiver has now gained | ||
1450 | * valid sync, so lets see if we can bring the link | 1443 | * valid sync, so lets see if we can bring the link |
1451 | * up. */ | 1444 | * up. |
1445 | */ | ||
1452 | ew32(TXCW, mac->txcw); | 1446 | ew32(TXCW, mac->txcw); |
1453 | ew32(CTRL, | 1447 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
1454 | (ctrl & ~E1000_CTRL_SLU)); | ||
1455 | mac->serdes_link_state = | 1448 | mac->serdes_link_state = |
1456 | e1000_serdes_link_autoneg_progress; | 1449 | e1000_serdes_link_autoneg_progress; |
1457 | hw_dbg(hw, "DOWN -> AN_PROG\n"); | 1450 | e_dbg("DOWN -> AN_PROG\n"); |
1458 | break; | 1451 | break; |
1459 | } | 1452 | } |
1460 | } else { | 1453 | } else { |
1461 | if (!(rxcw & E1000_RXCW_SYNCH)) { | 1454 | if (!(rxcw & E1000_RXCW_SYNCH)) { |
1462 | mac->serdes_has_link = false; | 1455 | mac->serdes_has_link = false; |
1463 | mac->serdes_link_state = e1000_serdes_link_down; | 1456 | mac->serdes_link_state = e1000_serdes_link_down; |
1464 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1457 | e_dbg("ANYSTATE -> DOWN\n"); |
1465 | } else { | 1458 | } else { |
1466 | /* | 1459 | /* |
1467 | * We have sync, and can tolerate one | 1460 | * We have sync, and can tolerate one invalid (IV) |
1468 | * invalid (IV) codeword before declaring | 1461 | * codeword before declaring link down, so reread |
1469 | * link down, so reread to look again | 1462 | * to look again. |
1470 | */ | 1463 | */ |
1471 | udelay(10); | 1464 | udelay(10); |
1472 | rxcw = er32(RXCW); | 1465 | rxcw = er32(RXCW); |
1473 | if (rxcw & E1000_RXCW_IV) { | 1466 | if (rxcw & E1000_RXCW_IV) { |
1474 | mac->serdes_link_state = e1000_serdes_link_down; | 1467 | mac->serdes_link_state = e1000_serdes_link_down; |
1475 | mac->serdes_has_link = false; | 1468 | mac->serdes_has_link = false; |
1476 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1469 | e_dbg("ANYSTATE -> DOWN\n"); |
1477 | } | 1470 | } |
1478 | } | 1471 | } |
1479 | } | 1472 | } |
@@ -1495,7 +1488,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1495 | 1488 | ||
1496 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1489 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1497 | if (ret_val) { | 1490 | if (ret_val) { |
1498 | hw_dbg(hw, "NVM Read Error\n"); | 1491 | e_dbg("NVM Read Error\n"); |
1499 | return ret_val; | 1492 | return ret_val; |
1500 | } | 1493 | } |
1501 | 1494 | ||
@@ -1525,7 +1518,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1525 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | 1518 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) |
1526 | { | 1519 | { |
1527 | if (hw->mac.type != e1000_82571) | 1520 | if (hw->mac.type != e1000_82571) |
1528 | return 0; | 1521 | return false; |
1529 | 1522 | ||
1530 | return hw->dev_spec.e82571.laa_is_present; | 1523 | return hw->dev_spec.e82571.laa_is_present; |
1531 | } | 1524 | } |
@@ -1535,7 +1528,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | |||
1535 | * @hw: pointer to the HW structure | 1528 | * @hw: pointer to the HW structure |
1536 | * @state: enable/disable locally administered address | 1529 | * @state: enable/disable locally administered address |
1537 | * | 1530 | * |
1538 | * Enable/Disable the current locally administers address state. | 1531 | * Enable/Disable the current locally administered address state. |
1539 | **/ | 1532 | **/ |
1540 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) | 1533 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) |
1541 | { | 1534 | { |
@@ -1609,6 +1602,51 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1609 | } | 1602 | } |
1610 | 1603 | ||
1611 | /** | 1604 | /** |
1605 | * e1000_read_mac_addr_82571 - Read device MAC address | ||
1606 | * @hw: pointer to the HW structure | ||
1607 | **/ | ||
1608 | static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) | ||
1609 | { | ||
1610 | s32 ret_val = 0; | ||
1611 | |||
1612 | /* | ||
1613 | * If there's an alternate MAC address place it in RAR0 | ||
1614 | * so that it will override the Si installed default perm | ||
1615 | * address. | ||
1616 | */ | ||
1617 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
1618 | if (ret_val) | ||
1619 | goto out; | ||
1620 | |||
1621 | ret_val = e1000_read_mac_addr_generic(hw); | ||
1622 | |||
1623 | out: | ||
1624 | return ret_val; | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * e1000_power_down_phy_copper_82571 - Remove link during PHY power down | ||
1629 | * @hw: pointer to the HW structure | ||
1630 | * | ||
1631 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1632 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1633 | **/ | ||
1634 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) | ||
1635 | { | ||
1636 | struct e1000_phy_info *phy = &hw->phy; | ||
1637 | struct e1000_mac_info *mac = &hw->mac; | ||
1638 | |||
1639 | if (!(phy->ops.check_reset_block)) | ||
1640 | return; | ||
1641 | |||
1642 | /* If the management interface is not enabled, then power down */ | ||
1643 | if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) | ||
1644 | e1000_power_down_phy_copper(hw); | ||
1645 | |||
1646 | return; | ||
1647 | } | ||
1648 | |||
1649 | /** | ||
1612 | * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters | 1650 | * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters |
1613 | * @hw: pointer to the HW structure | 1651 | * @hw: pointer to the HW structure |
1614 | * | 1652 | * |
@@ -1616,44 +1654,42 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1616 | **/ | 1654 | **/ |
1617 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) | 1655 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) |
1618 | { | 1656 | { |
1619 | u32 temp; | ||
1620 | |||
1621 | e1000e_clear_hw_cntrs_base(hw); | 1657 | e1000e_clear_hw_cntrs_base(hw); |
1622 | 1658 | ||
1623 | temp = er32(PRC64); | 1659 | er32(PRC64); |
1624 | temp = er32(PRC127); | 1660 | er32(PRC127); |
1625 | temp = er32(PRC255); | 1661 | er32(PRC255); |
1626 | temp = er32(PRC511); | 1662 | er32(PRC511); |
1627 | temp = er32(PRC1023); | 1663 | er32(PRC1023); |
1628 | temp = er32(PRC1522); | 1664 | er32(PRC1522); |
1629 | temp = er32(PTC64); | 1665 | er32(PTC64); |
1630 | temp = er32(PTC127); | 1666 | er32(PTC127); |
1631 | temp = er32(PTC255); | 1667 | er32(PTC255); |
1632 | temp = er32(PTC511); | 1668 | er32(PTC511); |
1633 | temp = er32(PTC1023); | 1669 | er32(PTC1023); |
1634 | temp = er32(PTC1522); | 1670 | er32(PTC1522); |
1635 | 1671 | ||
1636 | temp = er32(ALGNERRC); | 1672 | er32(ALGNERRC); |
1637 | temp = er32(RXERRC); | 1673 | er32(RXERRC); |
1638 | temp = er32(TNCRS); | 1674 | er32(TNCRS); |
1639 | temp = er32(CEXTERR); | 1675 | er32(CEXTERR); |
1640 | temp = er32(TSCTC); | 1676 | er32(TSCTC); |
1641 | temp = er32(TSCTFC); | 1677 | er32(TSCTFC); |
1642 | 1678 | ||
1643 | temp = er32(MGTPRC); | 1679 | er32(MGTPRC); |
1644 | temp = er32(MGTPDC); | 1680 | er32(MGTPDC); |
1645 | temp = er32(MGTPTC); | 1681 | er32(MGTPTC); |
1646 | 1682 | ||
1647 | temp = er32(IAC); | 1683 | er32(IAC); |
1648 | temp = er32(ICRXOC); | 1684 | er32(ICRXOC); |
1649 | 1685 | ||
1650 | temp = er32(ICRXPTC); | 1686 | er32(ICRXPTC); |
1651 | temp = er32(ICRXATC); | 1687 | er32(ICRXATC); |
1652 | temp = er32(ICTXPTC); | 1688 | er32(ICTXPTC); |
1653 | temp = er32(ICTXATC); | 1689 | er32(ICTXATC); |
1654 | temp = er32(ICTXQEC); | 1690 | er32(ICTXQEC); |
1655 | temp = er32(ICTXQMTC); | 1691 | er32(ICTXQMTC); |
1656 | temp = er32(ICRXDMTC); | 1692 | er32(ICRXDMTC); |
1657 | } | 1693 | } |
1658 | 1694 | ||
1659 | static struct e1000_mac_operations e82571_mac_ops = { | 1695 | static struct e1000_mac_operations e82571_mac_ops = { |
@@ -1663,76 +1699,83 @@ static struct e1000_mac_operations e82571_mac_ops = { | |||
1663 | .cleanup_led = e1000e_cleanup_led_generic, | 1699 | .cleanup_led = e1000e_cleanup_led_generic, |
1664 | .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, | 1700 | .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, |
1665 | .get_bus_info = e1000e_get_bus_info_pcie, | 1701 | .get_bus_info = e1000e_get_bus_info_pcie, |
1702 | .set_lan_id = e1000_set_lan_id_multi_port_pcie, | ||
1666 | /* .get_link_up_info: media type dependent */ | 1703 | /* .get_link_up_info: media type dependent */ |
1667 | /* .led_on: mac type dependent */ | 1704 | /* .led_on: mac type dependent */ |
1668 | .led_off = e1000e_led_off_generic, | 1705 | .led_off = e1000e_led_off_generic, |
1669 | .update_mc_addr_list = e1000_update_mc_addr_list_82571, | 1706 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, |
1707 | .write_vfta = e1000_write_vfta_generic, | ||
1708 | .clear_vfta = e1000_clear_vfta_82571, | ||
1670 | .reset_hw = e1000_reset_hw_82571, | 1709 | .reset_hw = e1000_reset_hw_82571, |
1671 | .init_hw = e1000_init_hw_82571, | 1710 | .init_hw = e1000_init_hw_82571, |
1672 | .setup_link = e1000_setup_link_82571, | 1711 | .setup_link = e1000_setup_link_82571, |
1673 | /* .setup_physical_interface: media type dependent */ | 1712 | /* .setup_physical_interface: media type dependent */ |
1674 | .setup_led = e1000e_setup_led_generic, | 1713 | .setup_led = e1000e_setup_led_generic, |
1714 | .read_mac_addr = e1000_read_mac_addr_82571, | ||
1675 | }; | 1715 | }; |
1676 | 1716 | ||
1677 | static struct e1000_phy_operations e82_phy_ops_igp = { | 1717 | static struct e1000_phy_operations e82_phy_ops_igp = { |
1678 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1718 | .acquire = e1000_get_hw_semaphore_82571, |
1719 | .check_polarity = e1000_check_polarity_igp, | ||
1679 | .check_reset_block = e1000e_check_reset_block_generic, | 1720 | .check_reset_block = e1000e_check_reset_block_generic, |
1680 | .commit_phy = NULL, | 1721 | .commit = NULL, |
1681 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, | 1722 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, |
1682 | .get_cfg_done = e1000_get_cfg_done_82571, | 1723 | .get_cfg_done = e1000_get_cfg_done_82571, |
1683 | .get_cable_length = e1000e_get_cable_length_igp_2, | 1724 | .get_cable_length = e1000e_get_cable_length_igp_2, |
1684 | .get_phy_info = e1000e_get_phy_info_igp, | 1725 | .get_info = e1000e_get_phy_info_igp, |
1685 | .read_phy_reg = e1000e_read_phy_reg_igp, | 1726 | .read_reg = e1000e_read_phy_reg_igp, |
1686 | .release_phy = e1000_put_hw_semaphore_82571, | 1727 | .release = e1000_put_hw_semaphore_82571, |
1687 | .reset_phy = e1000e_phy_hw_reset_generic, | 1728 | .reset = e1000e_phy_hw_reset_generic, |
1688 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1729 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1689 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1730 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1690 | .write_phy_reg = e1000e_write_phy_reg_igp, | 1731 | .write_reg = e1000e_write_phy_reg_igp, |
1691 | .cfg_on_link_up = NULL, | 1732 | .cfg_on_link_up = NULL, |
1692 | }; | 1733 | }; |
1693 | 1734 | ||
1694 | static struct e1000_phy_operations e82_phy_ops_m88 = { | 1735 | static struct e1000_phy_operations e82_phy_ops_m88 = { |
1695 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1736 | .acquire = e1000_get_hw_semaphore_82571, |
1737 | .check_polarity = e1000_check_polarity_m88, | ||
1696 | .check_reset_block = e1000e_check_reset_block_generic, | 1738 | .check_reset_block = e1000e_check_reset_block_generic, |
1697 | .commit_phy = e1000e_phy_sw_reset, | 1739 | .commit = e1000e_phy_sw_reset, |
1698 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1740 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1699 | .get_cfg_done = e1000e_get_cfg_done, | 1741 | .get_cfg_done = e1000e_get_cfg_done, |
1700 | .get_cable_length = e1000e_get_cable_length_m88, | 1742 | .get_cable_length = e1000e_get_cable_length_m88, |
1701 | .get_phy_info = e1000e_get_phy_info_m88, | 1743 | .get_info = e1000e_get_phy_info_m88, |
1702 | .read_phy_reg = e1000e_read_phy_reg_m88, | 1744 | .read_reg = e1000e_read_phy_reg_m88, |
1703 | .release_phy = e1000_put_hw_semaphore_82571, | 1745 | .release = e1000_put_hw_semaphore_82571, |
1704 | .reset_phy = e1000e_phy_hw_reset_generic, | 1746 | .reset = e1000e_phy_hw_reset_generic, |
1705 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1747 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1706 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1748 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1707 | .write_phy_reg = e1000e_write_phy_reg_m88, | 1749 | .write_reg = e1000e_write_phy_reg_m88, |
1708 | .cfg_on_link_up = NULL, | 1750 | .cfg_on_link_up = NULL, |
1709 | }; | 1751 | }; |
1710 | 1752 | ||
1711 | static struct e1000_phy_operations e82_phy_ops_bm = { | 1753 | static struct e1000_phy_operations e82_phy_ops_bm = { |
1712 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1754 | .acquire = e1000_get_hw_semaphore_82571, |
1755 | .check_polarity = e1000_check_polarity_m88, | ||
1713 | .check_reset_block = e1000e_check_reset_block_generic, | 1756 | .check_reset_block = e1000e_check_reset_block_generic, |
1714 | .commit_phy = e1000e_phy_sw_reset, | 1757 | .commit = e1000e_phy_sw_reset, |
1715 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1758 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1716 | .get_cfg_done = e1000e_get_cfg_done, | 1759 | .get_cfg_done = e1000e_get_cfg_done, |
1717 | .get_cable_length = e1000e_get_cable_length_m88, | 1760 | .get_cable_length = e1000e_get_cable_length_m88, |
1718 | .get_phy_info = e1000e_get_phy_info_m88, | 1761 | .get_info = e1000e_get_phy_info_m88, |
1719 | .read_phy_reg = e1000e_read_phy_reg_bm2, | 1762 | .read_reg = e1000e_read_phy_reg_bm2, |
1720 | .release_phy = e1000_put_hw_semaphore_82571, | 1763 | .release = e1000_put_hw_semaphore_82571, |
1721 | .reset_phy = e1000e_phy_hw_reset_generic, | 1764 | .reset = e1000e_phy_hw_reset_generic, |
1722 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1765 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1723 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1766 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1724 | .write_phy_reg = e1000e_write_phy_reg_bm2, | 1767 | .write_reg = e1000e_write_phy_reg_bm2, |
1725 | .cfg_on_link_up = NULL, | 1768 | .cfg_on_link_up = NULL, |
1726 | }; | 1769 | }; |
1727 | 1770 | ||
1728 | static struct e1000_nvm_operations e82571_nvm_ops = { | 1771 | static struct e1000_nvm_operations e82571_nvm_ops = { |
1729 | .acquire_nvm = e1000_acquire_nvm_82571, | 1772 | .acquire = e1000_acquire_nvm_82571, |
1730 | .read_nvm = e1000e_read_nvm_eerd, | 1773 | .read = e1000e_read_nvm_eerd, |
1731 | .release_nvm = e1000_release_nvm_82571, | 1774 | .release = e1000_release_nvm_82571, |
1732 | .update_nvm = e1000_update_nvm_checksum_82571, | 1775 | .update = e1000_update_nvm_checksum_82571, |
1733 | .valid_led_default = e1000_valid_led_default_82571, | 1776 | .valid_led_default = e1000_valid_led_default_82571, |
1734 | .validate_nvm = e1000_validate_nvm_checksum_82571, | 1777 | .validate = e1000_validate_nvm_checksum_82571, |
1735 | .write_nvm = e1000_write_nvm_82571, | 1778 | .write = e1000_write_nvm_82571, |
1736 | }; | 1779 | }; |
1737 | 1780 | ||
1738 | struct e1000_info e1000_82571_info = { | 1781 | struct e1000_info e1000_82571_info = { |
@@ -1747,6 +1790,7 @@ struct e1000_info e1000_82571_info = { | |||
1747 | | FLAG_RESET_OVERWRITES_LAA /* errata */ | 1790 | | FLAG_RESET_OVERWRITES_LAA /* errata */ |
1748 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ | 1791 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ |
1749 | | FLAG_APME_CHECK_PORT_B, | 1792 | | FLAG_APME_CHECK_PORT_B, |
1793 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1750 | .pba = 38, | 1794 | .pba = 38, |
1751 | .max_hw_frame_size = DEFAULT_JUMBO, | 1795 | .max_hw_frame_size = DEFAULT_JUMBO, |
1752 | .get_variants = e1000_get_variants_82571, | 1796 | .get_variants = e1000_get_variants_82571, |
@@ -1764,6 +1808,7 @@ struct e1000_info e1000_82572_info = { | |||
1764 | | FLAG_RX_CSUM_ENABLED | 1808 | | FLAG_RX_CSUM_ENABLED |
1765 | | FLAG_HAS_CTRLEXT_ON_LOAD | 1809 | | FLAG_HAS_CTRLEXT_ON_LOAD |
1766 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ | 1810 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ |
1811 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1767 | .pba = 38, | 1812 | .pba = 38, |
1768 | .max_hw_frame_size = DEFAULT_JUMBO, | 1813 | .max_hw_frame_size = DEFAULT_JUMBO, |
1769 | .get_variants = e1000_get_variants_82571, | 1814 | .get_variants = e1000_get_variants_82571, |
@@ -1775,13 +1820,11 @@ struct e1000_info e1000_82572_info = { | |||
1775 | struct e1000_info e1000_82573_info = { | 1820 | struct e1000_info e1000_82573_info = { |
1776 | .mac = e1000_82573, | 1821 | .mac = e1000_82573, |
1777 | .flags = FLAG_HAS_HW_VLAN_FILTER | 1822 | .flags = FLAG_HAS_HW_VLAN_FILTER |
1778 | | FLAG_HAS_JUMBO_FRAMES | ||
1779 | | FLAG_HAS_WOL | 1823 | | FLAG_HAS_WOL |
1780 | | FLAG_APME_IN_CTRL3 | 1824 | | FLAG_APME_IN_CTRL3 |
1781 | | FLAG_RX_CSUM_ENABLED | 1825 | | FLAG_RX_CSUM_ENABLED |
1782 | | FLAG_HAS_SMART_POWER_DOWN | 1826 | | FLAG_HAS_SMART_POWER_DOWN |
1783 | | FLAG_HAS_AMT | 1827 | | FLAG_HAS_AMT |
1784 | | FLAG_HAS_ERT | ||
1785 | | FLAG_HAS_SWSM_ON_LOAD, | 1828 | | FLAG_HAS_SWSM_ON_LOAD, |
1786 | .pba = 20, | 1829 | .pba = 20, |
1787 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1830 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 1190167a8b3d..e301e26d6897 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -74,7 +74,7 @@ | |||
74 | #define E1000_WUS_BC E1000_WUFC_BC | 74 | #define E1000_WUS_BC E1000_WUFC_BC |
75 | 75 | ||
76 | /* Extended Device Control */ | 76 | /* Extended Device Control */ |
77 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ | 77 | #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ |
78 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ | 78 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ |
79 | #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ | 79 | #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ |
80 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ | 80 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ |
@@ -320,6 +320,8 @@ | |||
320 | #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ | 320 | #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ |
321 | 321 | ||
322 | /* Header split receive */ | 322 | /* Header split receive */ |
323 | #define E1000_RFCTL_NFSW_DIS 0x00000040 | ||
324 | #define E1000_RFCTL_NFSR_DIS 0x00000080 | ||
323 | #define E1000_RFCTL_ACK_DIS 0x00001000 | 325 | #define E1000_RFCTL_ACK_DIS 0x00001000 |
324 | #define E1000_RFCTL_EXTEN 0x00008000 | 326 | #define E1000_RFCTL_EXTEN 0x00008000 |
325 | #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 | 327 | #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 |
@@ -460,6 +462,8 @@ | |||
460 | */ | 462 | */ |
461 | #define E1000_RAR_ENTRIES 15 | 463 | #define E1000_RAR_ENTRIES 15 |
462 | #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ | 464 | #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ |
465 | #define E1000_RAL_MAC_ADDR_LEN 4 | ||
466 | #define E1000_RAH_MAC_ADDR_LEN 2 | ||
463 | 467 | ||
464 | /* Error Codes */ | 468 | /* Error Codes */ |
465 | #define E1000_ERR_NVM 1 | 469 | #define E1000_ERR_NVM 1 |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 3e187b0e4203..ee32b9b27a9f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | ||
40 | #include <linux/pci-aspm.h> | ||
39 | 41 | ||
40 | #include "hw.h" | 42 | #include "hw.h" |
41 | 43 | ||
@@ -47,9 +49,9 @@ struct e1000_info; | |||
47 | 49 | ||
48 | #ifdef DEBUG | 50 | #ifdef DEBUG |
49 | #define e_dbg(format, arg...) \ | 51 | #define e_dbg(format, arg...) \ |
50 | e_printk(KERN_DEBUG , adapter, format, ## arg) | 52 | e_printk(KERN_DEBUG , hw->adapter, format, ## arg) |
51 | #else | 53 | #else |
52 | #define e_dbg(format, arg...) do { (void)(adapter); } while (0) | 54 | #define e_dbg(format, arg...) do { (void)(hw); } while (0) |
53 | #endif | 55 | #endif |
54 | 56 | ||
55 | #define e_err(format, arg...) \ | 57 | #define e_err(format, arg...) \ |
@@ -193,12 +195,15 @@ struct e1000_buffer { | |||
193 | unsigned long time_stamp; | 195 | unsigned long time_stamp; |
194 | u16 length; | 196 | u16 length; |
195 | u16 next_to_watch; | 197 | u16 next_to_watch; |
198 | u16 mapped_as_page; | ||
196 | }; | 199 | }; |
197 | /* Rx */ | 200 | /* Rx */ |
198 | /* arrays of page information for packet split */ | 201 | struct { |
199 | struct e1000_ps_page *ps_pages; | 202 | /* arrays of page information for packet split */ |
203 | struct e1000_ps_page *ps_pages; | ||
204 | struct page *page; | ||
205 | }; | ||
200 | }; | 206 | }; |
201 | struct page *page; | ||
202 | }; | 207 | }; |
203 | 208 | ||
204 | struct e1000_ring { | 209 | struct e1000_ring { |
@@ -275,7 +280,6 @@ struct e1000_adapter { | |||
275 | 280 | ||
276 | struct napi_struct napi; | 281 | struct napi_struct napi; |
277 | 282 | ||
278 | unsigned long tx_queue_len; | ||
279 | unsigned int restart_queue; | 283 | unsigned int restart_queue; |
280 | u32 txd_cmd; | 284 | u32 txd_cmd; |
281 | 285 | ||
@@ -331,7 +335,6 @@ struct e1000_adapter { | |||
331 | /* OS defined structs */ | 335 | /* OS defined structs */ |
332 | struct net_device *netdev; | 336 | struct net_device *netdev; |
333 | struct pci_dev *pdev; | 337 | struct pci_dev *pdev; |
334 | struct net_device_stats net_stats; | ||
335 | 338 | ||
336 | /* structs defined in e1000_hw.h */ | 339 | /* structs defined in e1000_hw.h */ |
337 | struct e1000_hw hw; | 340 | struct e1000_hw hw; |
@@ -366,12 +369,13 @@ struct e1000_adapter { | |||
366 | struct work_struct downshift_task; | 369 | struct work_struct downshift_task; |
367 | struct work_struct update_phy_task; | 370 | struct work_struct update_phy_task; |
368 | struct work_struct led_blink_task; | 371 | struct work_struct led_blink_task; |
372 | struct work_struct print_hang_task; | ||
369 | }; | 373 | }; |
370 | 374 | ||
371 | struct e1000_info { | 375 | struct e1000_info { |
372 | enum e1000_mac_type mac; | 376 | enum e1000_mac_type mac; |
373 | unsigned int flags; | 377 | unsigned int flags; |
374 | unsigned int flags2; | 378 | unsigned int flags2; |
375 | u32 pba; | 379 | u32 pba; |
376 | u32 max_hw_frame_size; | 380 | u32 max_hw_frame_size; |
377 | s32 (*get_variants)(struct e1000_adapter *); | 381 | s32 (*get_variants)(struct e1000_adapter *); |
@@ -417,6 +421,8 @@ struct e1000_info { | |||
417 | /* CRC Stripping defines */ | 421 | /* CRC Stripping defines */ |
418 | #define FLAG2_CRC_STRIPPING (1 << 0) | 422 | #define FLAG2_CRC_STRIPPING (1 << 0) |
419 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | 423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) |
424 | #define FLAG2_IS_DISCARDING (1 << 2) | ||
425 | #define FLAG2_DISABLE_ASPM_L1 (1 << 3) | ||
420 | 426 | ||
421 | #define E1000_RX_DESC_PS(R, i) \ | 427 | #define E1000_RX_DESC_PS(R, i) \ |
422 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 428 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
@@ -454,9 +460,10 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); | |||
454 | extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); | 460 | extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); |
455 | extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); | 461 | extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); |
456 | extern void e1000e_update_stats(struct e1000_adapter *adapter); | 462 | extern void e1000e_update_stats(struct e1000_adapter *adapter); |
457 | extern bool e1000_has_link(struct e1000_adapter *adapter); | 463 | extern bool e1000e_has_link(struct e1000_adapter *adapter); |
458 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | 464 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); |
459 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | 465 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); |
466 | extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | ||
460 | 467 | ||
461 | extern unsigned int copybreak; | 468 | extern unsigned int copybreak; |
462 | 469 | ||
@@ -488,6 +495,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
488 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | 495 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); |
489 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | 496 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); |
490 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); | 497 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); |
498 | extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
491 | 499 | ||
492 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | 500 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); |
493 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | 501 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); |
@@ -497,6 +505,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); | |||
497 | extern s32 e1000e_led_on_generic(struct e1000_hw *hw); | 505 | extern s32 e1000e_led_on_generic(struct e1000_hw *hw); |
498 | extern s32 e1000e_led_off_generic(struct e1000_hw *hw); | 506 | extern s32 e1000e_led_off_generic(struct e1000_hw *hw); |
499 | extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); | 507 | extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); |
508 | extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); | ||
509 | extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); | ||
500 | extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); | 510 | extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); |
501 | extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); | 511 | extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); |
502 | extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); | 512 | extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); |
@@ -507,13 +517,11 @@ extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); | |||
507 | extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); | 517 | extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); |
508 | extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); | 518 | extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); |
509 | extern s32 e1000e_setup_link(struct e1000_hw *hw); | 519 | extern s32 e1000e_setup_link(struct e1000_hw *hw); |
510 | extern void e1000e_clear_vfta(struct e1000_hw *hw); | 520 | extern void e1000_clear_vfta_generic(struct e1000_hw *hw); |
511 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | 521 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); |
512 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | 522 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
513 | u8 *mc_addr_list, | 523 | u8 *mc_addr_list, |
514 | u32 mc_addr_count, | 524 | u32 mc_addr_count); |
515 | u32 rar_used_count, | ||
516 | u32 rar_count); | ||
517 | extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | 525 | extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); |
518 | extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); | 526 | extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); |
519 | extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); | 527 | extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); |
@@ -523,7 +531,8 @@ extern void e1000e_config_collision_dist(struct e1000_hw *hw); | |||
523 | extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); | 531 | extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); |
524 | extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); | 532 | extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); |
525 | extern s32 e1000e_blink_led(struct e1000_hw *hw); | 533 | extern s32 e1000e_blink_led(struct e1000_hw *hw); |
526 | extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); | 534 | extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); |
535 | extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); | ||
527 | extern void e1000e_reset_adaptive(struct e1000_hw *hw); | 536 | extern void e1000e_reset_adaptive(struct e1000_hw *hw); |
528 | extern void e1000e_update_adaptive(struct e1000_hw *hw); | 537 | extern void e1000e_update_adaptive(struct e1000_hw *hw); |
529 | 538 | ||
@@ -566,6 +575,8 @@ extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | |||
566 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | 575 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, |
567 | u32 usec_interval, bool *success); | 576 | u32 usec_interval, bool *success); |
568 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); | 577 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); |
578 | extern void e1000_power_up_phy_copper(struct e1000_hw *hw); | ||
579 | extern void e1000_power_down_phy_copper(struct e1000_hw *hw); | ||
569 | extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | 580 | extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); |
570 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | 581 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); |
571 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); | 582 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); |
@@ -575,7 +586,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | |||
575 | extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); | 586 | extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); |
576 | extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | 587 | extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, |
577 | u16 data); | 588 | u16 data); |
578 | extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); | ||
579 | extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); | 589 | extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); |
580 | extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); | 590 | extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); |
581 | extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); | 591 | extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); |
@@ -583,9 +593,15 @@ extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); | |||
583 | extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); | 593 | extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); |
584 | extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); | 594 | extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); |
585 | 595 | ||
596 | extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); | ||
597 | extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); | ||
598 | extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); | ||
599 | extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); | ||
600 | extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); | ||
601 | |||
586 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) | 602 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) |
587 | { | 603 | { |
588 | return hw->phy.ops.reset_phy(hw); | 604 | return hw->phy.ops.reset(hw); |
589 | } | 605 | } |
590 | 606 | ||
591 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | 607 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) |
@@ -595,12 +611,12 @@ static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | |||
595 | 611 | ||
596 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | 612 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) |
597 | { | 613 | { |
598 | return hw->phy.ops.read_phy_reg(hw, offset, data); | 614 | return hw->phy.ops.read_reg(hw, offset, data); |
599 | } | 615 | } |
600 | 616 | ||
601 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) | 617 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) |
602 | { | 618 | { |
603 | return hw->phy.ops.write_phy_reg(hw, offset, data); | 619 | return hw->phy.ops.write_reg(hw, offset, data); |
604 | } | 620 | } |
605 | 621 | ||
606 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) | 622 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) |
@@ -616,31 +632,39 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 | |||
616 | extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); | 632 | extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); |
617 | extern void e1000e_release_nvm(struct e1000_hw *hw); | 633 | extern void e1000e_release_nvm(struct e1000_hw *hw); |
618 | extern void e1000e_reload_nvm(struct e1000_hw *hw); | 634 | extern void e1000e_reload_nvm(struct e1000_hw *hw); |
619 | extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); | 635 | extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); |
636 | |||
637 | static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) | ||
638 | { | ||
639 | if (hw->mac.ops.read_mac_addr) | ||
640 | return hw->mac.ops.read_mac_addr(hw); | ||
641 | |||
642 | return e1000_read_mac_addr_generic(hw); | ||
643 | } | ||
620 | 644 | ||
621 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) | 645 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) |
622 | { | 646 | { |
623 | return hw->nvm.ops.validate_nvm(hw); | 647 | return hw->nvm.ops.validate(hw); |
624 | } | 648 | } |
625 | 649 | ||
626 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) | 650 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) |
627 | { | 651 | { |
628 | return hw->nvm.ops.update_nvm(hw); | 652 | return hw->nvm.ops.update(hw); |
629 | } | 653 | } |
630 | 654 | ||
631 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 655 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
632 | { | 656 | { |
633 | return hw->nvm.ops.read_nvm(hw, offset, words, data); | 657 | return hw->nvm.ops.read(hw, offset, words, data); |
634 | } | 658 | } |
635 | 659 | ||
636 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 660 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
637 | { | 661 | { |
638 | return hw->nvm.ops.write_nvm(hw, offset, words, data); | 662 | return hw->nvm.ops.write(hw, offset, words, data); |
639 | } | 663 | } |
640 | 664 | ||
641 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) | 665 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) |
642 | { | 666 | { |
643 | return hw->phy.ops.get_phy_info(hw); | 667 | return hw->phy.ops.get_info(hw); |
644 | } | 668 | } |
645 | 669 | ||
646 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) | 670 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index ae5d73689353..27d21589a69a 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -31,11 +31,6 @@ | |||
31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) | 31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/pci.h> | ||
38 | |||
39 | #include "e1000.h" | 34 | #include "e1000.h" |
40 | 35 | ||
41 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 | 36 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 |
@@ -51,6 +46,9 @@ | |||
51 | #define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 | 46 | #define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 |
52 | #define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 | 47 | #define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 |
53 | 48 | ||
49 | #define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C | ||
50 | #define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 | ||
51 | |||
54 | #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ | 52 | #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ |
55 | #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 | 53 | #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 |
56 | 54 | ||
@@ -104,6 +102,8 @@ | |||
104 | */ | 102 | */ |
105 | static const u16 e1000_gg82563_cable_length_table[] = | 103 | static const u16 e1000_gg82563_cable_length_table[] = |
106 | { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; | 104 | { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; |
105 | #define GG82563_CABLE_LENGTH_TABLE_SIZE \ | ||
106 | ARRAY_SIZE(e1000_gg82563_cable_length_table) | ||
107 | 107 | ||
108 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); | 108 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); |
109 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); | 109 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); |
@@ -117,12 +117,11 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
117 | u16 *data); | 117 | u16 *data); |
118 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | 118 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, |
119 | u16 data); | 119 | u16 data); |
120 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); | ||
120 | 121 | ||
121 | /** | 122 | /** |
122 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. | 123 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. |
123 | * @hw: pointer to the HW structure | 124 | * @hw: pointer to the HW structure |
124 | * | ||
125 | * This is a function pointer entry point called by the api module. | ||
126 | **/ | 125 | **/ |
127 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | 126 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) |
128 | { | 127 | { |
@@ -132,6 +131,9 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
132 | if (hw->phy.media_type != e1000_media_type_copper) { | 131 | if (hw->phy.media_type != e1000_media_type_copper) { |
133 | phy->type = e1000_phy_none; | 132 | phy->type = e1000_phy_none; |
134 | return 0; | 133 | return 0; |
134 | } else { | ||
135 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
136 | phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; | ||
135 | } | 137 | } |
136 | 138 | ||
137 | phy->addr = 1; | 139 | phy->addr = 1; |
@@ -152,8 +154,6 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
152 | /** | 154 | /** |
153 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. | 155 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. |
154 | * @hw: pointer to the HW structure | 156 | * @hw: pointer to the HW structure |
155 | * | ||
156 | * This is a function pointer entry point called by the api module. | ||
157 | **/ | 157 | **/ |
158 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | 158 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) |
159 | { | 159 | { |
@@ -200,8 +200,6 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | |||
200 | /** | 200 | /** |
201 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. | 201 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. |
202 | * @hw: pointer to the HW structure | 202 | * @hw: pointer to the HW structure |
203 | * | ||
204 | * This is a function pointer entry point called by the api module. | ||
205 | **/ | 203 | **/ |
206 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | 204 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) |
207 | { | 205 | { |
@@ -224,7 +222,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
224 | /* Set rar entry count */ | 222 | /* Set rar entry count */ |
225 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 223 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
226 | /* Set if manageability features are enabled. */ | 224 | /* Set if manageability features are enabled. */ |
227 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 225 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
226 | ? true : false; | ||
227 | /* Adaptive IFS not supported */ | ||
228 | mac->adaptive_ifs = false; | ||
228 | 229 | ||
229 | /* check for link */ | 230 | /* check for link */ |
230 | switch (hw->phy.media_type) { | 231 | switch (hw->phy.media_type) { |
@@ -245,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
245 | break; | 246 | break; |
246 | } | 247 | } |
247 | 248 | ||
249 | /* set lan id for port to determine which phy lock to use */ | ||
250 | hw->mac.ops.set_lan_id(hw); | ||
251 | |||
248 | return 0; | 252 | return 0; |
249 | } | 253 | } |
250 | 254 | ||
@@ -272,8 +276,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) | |||
272 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY | 276 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY |
273 | * @hw: pointer to the HW structure | 277 | * @hw: pointer to the HW structure |
274 | * | 278 | * |
275 | * A wrapper to acquire access rights to the correct PHY. This is a | 279 | * A wrapper to acquire access rights to the correct PHY. |
276 | * function pointer entry point called by the api module. | ||
277 | **/ | 280 | **/ |
278 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | 281 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) |
279 | { | 282 | { |
@@ -287,8 +290,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | |||
287 | * e1000_release_phy_80003es2lan - Release rights to access PHY | 290 | * e1000_release_phy_80003es2lan - Release rights to access PHY |
288 | * @hw: pointer to the HW structure | 291 | * @hw: pointer to the HW structure |
289 | * | 292 | * |
290 | * A wrapper to release access rights to the correct PHY. This is a | 293 | * A wrapper to release access rights to the correct PHY. |
291 | * function pointer entry point called by the api module. | ||
292 | **/ | 294 | **/ |
293 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) | 295 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) |
294 | { | 296 | { |
@@ -333,8 +335,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) | |||
333 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM | 335 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM |
334 | * @hw: pointer to the HW structure | 336 | * @hw: pointer to the HW structure |
335 | * | 337 | * |
336 | * Acquire the semaphore to access the EEPROM. This is a function | 338 | * Acquire the semaphore to access the EEPROM. |
337 | * pointer entry point called by the api module. | ||
338 | **/ | 339 | **/ |
339 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | 340 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) |
340 | { | 341 | { |
@@ -356,8 +357,7 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | |||
356 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM | 357 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM |
357 | * @hw: pointer to the HW structure | 358 | * @hw: pointer to the HW structure |
358 | * | 359 | * |
359 | * Release the semaphore used to access the EEPROM. This is a | 360 | * Release the semaphore used to access the EEPROM. |
360 | * function pointer entry point called by the api module. | ||
361 | **/ | 361 | **/ |
362 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) | 362 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) |
363 | { | 363 | { |
@@ -399,8 +399,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
399 | } | 399 | } |
400 | 400 | ||
401 | if (i == timeout) { | 401 | if (i == timeout) { |
402 | hw_dbg(hw, | 402 | e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); |
403 | "Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
404 | return -E1000_ERR_SWFW_SYNC; | 403 | return -E1000_ERR_SWFW_SYNC; |
405 | } | 404 | } |
406 | 405 | ||
@@ -440,8 +439,7 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
440 | * @offset: offset of the register to read | 439 | * @offset: offset of the register to read |
441 | * @data: pointer to the data returned from the operation | 440 | * @data: pointer to the data returned from the operation |
442 | * | 441 | * |
443 | * Read the GG82563 PHY register. This is a function pointer entry | 442 | * Read the GG82563 PHY register. |
444 | * point called by the api module. | ||
445 | **/ | 443 | **/ |
446 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 444 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
447 | u32 offset, u16 *data) | 445 | u32 offset, u16 *data) |
@@ -472,28 +470,36 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
472 | return ret_val; | 470 | return ret_val; |
473 | } | 471 | } |
474 | 472 | ||
475 | /* | 473 | if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { |
476 | * The "ready" bit in the MDIC register may be incorrectly set | 474 | /* |
477 | * before the device has completed the "Page Select" MDI | 475 | * The "ready" bit in the MDIC register may be incorrectly set |
478 | * transaction. So we wait 200us after each MDI command... | 476 | * before the device has completed the "Page Select" MDI |
479 | */ | 477 | * transaction. So we wait 200us after each MDI command... |
480 | udelay(200); | 478 | */ |
479 | udelay(200); | ||
481 | 480 | ||
482 | /* ...and verify the command was successful. */ | 481 | /* ...and verify the command was successful. */ |
483 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | 482 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); |
484 | 483 | ||
485 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { | 484 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { |
486 | ret_val = -E1000_ERR_PHY; | 485 | ret_val = -E1000_ERR_PHY; |
487 | e1000_release_phy_80003es2lan(hw); | 486 | e1000_release_phy_80003es2lan(hw); |
488 | return ret_val; | 487 | return ret_val; |
489 | } | 488 | } |
490 | 489 | ||
491 | udelay(200); | 490 | udelay(200); |
492 | 491 | ||
493 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 492 | ret_val = e1000e_read_phy_reg_mdic(hw, |
494 | data); | 493 | MAX_PHY_REG_ADDRESS & offset, |
494 | data); | ||
495 | |||
496 | udelay(200); | ||
497 | } else { | ||
498 | ret_val = e1000e_read_phy_reg_mdic(hw, | ||
499 | MAX_PHY_REG_ADDRESS & offset, | ||
500 | data); | ||
501 | } | ||
495 | 502 | ||
496 | udelay(200); | ||
497 | e1000_release_phy_80003es2lan(hw); | 503 | e1000_release_phy_80003es2lan(hw); |
498 | 504 | ||
499 | return ret_val; | 505 | return ret_val; |
@@ -505,8 +511,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
505 | * @offset: offset of the register to read | 511 | * @offset: offset of the register to read |
506 | * @data: value to write to the register | 512 | * @data: value to write to the register |
507 | * | 513 | * |
508 | * Write to the GG82563 PHY register. This is a function pointer entry | 514 | * Write to the GG82563 PHY register. |
509 | * point called by the api module. | ||
510 | **/ | 515 | **/ |
511 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 516 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
512 | u32 offset, u16 data) | 517 | u32 offset, u16 data) |
@@ -537,28 +542,35 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
537 | return ret_val; | 542 | return ret_val; |
538 | } | 543 | } |
539 | 544 | ||
545 | if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { | ||
546 | /* | ||
547 | * The "ready" bit in the MDIC register may be incorrectly set | ||
548 | * before the device has completed the "Page Select" MDI | ||
549 | * transaction. So we wait 200us after each MDI command... | ||
550 | */ | ||
551 | udelay(200); | ||
540 | 552 | ||
541 | /* | 553 | /* ...and verify the command was successful. */ |
542 | * The "ready" bit in the MDIC register may be incorrectly set | 554 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); |
543 | * before the device has completed the "Page Select" MDI | ||
544 | * transaction. So we wait 200us after each MDI command... | ||
545 | */ | ||
546 | udelay(200); | ||
547 | 555 | ||
548 | /* ...and verify the command was successful. */ | 556 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { |
549 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | 557 | e1000_release_phy_80003es2lan(hw); |
558 | return -E1000_ERR_PHY; | ||
559 | } | ||
550 | 560 | ||
551 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { | 561 | udelay(200); |
552 | e1000_release_phy_80003es2lan(hw); | ||
553 | return -E1000_ERR_PHY; | ||
554 | } | ||
555 | 562 | ||
556 | udelay(200); | 563 | ret_val = e1000e_write_phy_reg_mdic(hw, |
564 | MAX_PHY_REG_ADDRESS & offset, | ||
565 | data); | ||
557 | 566 | ||
558 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 567 | udelay(200); |
559 | data); | 568 | } else { |
569 | ret_val = e1000e_write_phy_reg_mdic(hw, | ||
570 | MAX_PHY_REG_ADDRESS & offset, | ||
571 | data); | ||
572 | } | ||
560 | 573 | ||
561 | udelay(200); | ||
562 | e1000_release_phy_80003es2lan(hw); | 574 | e1000_release_phy_80003es2lan(hw); |
563 | 575 | ||
564 | return ret_val; | 576 | return ret_val; |
@@ -571,8 +583,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
571 | * @words: number of words to write | 583 | * @words: number of words to write |
572 | * @data: buffer of data to write to the NVM | 584 | * @data: buffer of data to write to the NVM |
573 | * | 585 | * |
574 | * Write "words" of data to the ESB2 NVM. This is a function | 586 | * Write "words" of data to the ESB2 NVM. |
575 | * pointer entry point called by the api module. | ||
576 | **/ | 587 | **/ |
577 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, | 588 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, |
578 | u16 words, u16 *data) | 589 | u16 words, u16 *data) |
@@ -602,7 +613,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) | |||
602 | timeout--; | 613 | timeout--; |
603 | } | 614 | } |
604 | if (!timeout) { | 615 | if (!timeout) { |
605 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 616 | e_dbg("MNG configuration cycle has not completed.\n"); |
606 | return -E1000_ERR_RESET; | 617 | return -E1000_ERR_RESET; |
607 | } | 618 | } |
608 | 619 | ||
@@ -635,7 +646,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
635 | if (ret_val) | 646 | if (ret_val) |
636 | return ret_val; | 647 | return ret_val; |
637 | 648 | ||
638 | hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); | 649 | e_dbg("GG82563 PSCR: %X\n", phy_data); |
639 | 650 | ||
640 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 651 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
641 | if (ret_val) | 652 | if (ret_val) |
@@ -653,7 +664,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
653 | udelay(1); | 664 | udelay(1); |
654 | 665 | ||
655 | if (hw->phy.autoneg_wait_to_complete) { | 666 | if (hw->phy.autoneg_wait_to_complete) { |
656 | hw_dbg(hw, "Waiting for forced speed/duplex link " | 667 | e_dbg("Waiting for forced speed/duplex link " |
657 | "on GG82563 phy.\n"); | 668 | "on GG82563 phy.\n"); |
658 | 669 | ||
659 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 670 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
@@ -712,21 +723,27 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
712 | static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | 723 | static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) |
713 | { | 724 | { |
714 | struct e1000_phy_info *phy = &hw->phy; | 725 | struct e1000_phy_info *phy = &hw->phy; |
715 | s32 ret_val; | 726 | s32 ret_val = 0; |
716 | u16 phy_data; | 727 | u16 phy_data, index; |
717 | u16 index; | ||
718 | 728 | ||
719 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); | 729 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); |
720 | if (ret_val) | 730 | if (ret_val) |
721 | return ret_val; | 731 | goto out; |
722 | 732 | ||
723 | index = phy_data & GG82563_DSPD_CABLE_LENGTH; | 733 | index = phy_data & GG82563_DSPD_CABLE_LENGTH; |
734 | |||
735 | if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { | ||
736 | ret_val = -E1000_ERR_PHY; | ||
737 | goto out; | ||
738 | } | ||
739 | |||
724 | phy->min_cable_length = e1000_gg82563_cable_length_table[index]; | 740 | phy->min_cable_length = e1000_gg82563_cable_length_table[index]; |
725 | phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; | 741 | phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; |
726 | 742 | ||
727 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | 743 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; |
728 | 744 | ||
729 | return 0; | 745 | out: |
746 | return ret_val; | ||
730 | } | 747 | } |
731 | 748 | ||
732 | /** | 749 | /** |
@@ -736,7 +753,6 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | |||
736 | * @duplex: pointer to duplex buffer | 753 | * @duplex: pointer to duplex buffer |
737 | * | 754 | * |
738 | * Retrieve the current speed and duplex configuration. | 755 | * Retrieve the current speed and duplex configuration. |
739 | * This is a function pointer entry point called by the api module. | ||
740 | **/ | 756 | **/ |
741 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | 757 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, |
742 | u16 *duplex) | 758 | u16 *duplex) |
@@ -762,12 +778,10 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | |||
762 | * @hw: pointer to the HW structure | 778 | * @hw: pointer to the HW structure |
763 | * | 779 | * |
764 | * Perform a global reset to the ESB2 controller. | 780 | * Perform a global reset to the ESB2 controller. |
765 | * This is a function pointer entry point called by the api module. | ||
766 | **/ | 781 | **/ |
767 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | 782 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) |
768 | { | 783 | { |
769 | u32 ctrl; | 784 | u32 ctrl, icr; |
770 | u32 icr; | ||
771 | s32 ret_val; | 785 | s32 ret_val; |
772 | 786 | ||
773 | /* | 787 | /* |
@@ -776,9 +790,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
776 | */ | 790 | */ |
777 | ret_val = e1000e_disable_pcie_master(hw); | 791 | ret_val = e1000e_disable_pcie_master(hw); |
778 | if (ret_val) | 792 | if (ret_val) |
779 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 793 | e_dbg("PCI-E Master disable polling has failed.\n"); |
780 | 794 | ||
781 | hw_dbg(hw, "Masking off all interrupts\n"); | 795 | e_dbg("Masking off all interrupts\n"); |
782 | ew32(IMC, 0xffffffff); | 796 | ew32(IMC, 0xffffffff); |
783 | 797 | ||
784 | ew32(RCTL, 0); | 798 | ew32(RCTL, 0); |
@@ -790,7 +804,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
790 | ctrl = er32(CTRL); | 804 | ctrl = er32(CTRL); |
791 | 805 | ||
792 | ret_val = e1000_acquire_phy_80003es2lan(hw); | 806 | ret_val = e1000_acquire_phy_80003es2lan(hw); |
793 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 807 | e_dbg("Issuing a global reset to MAC\n"); |
794 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 808 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
795 | e1000_release_phy_80003es2lan(hw); | 809 | e1000_release_phy_80003es2lan(hw); |
796 | 810 | ||
@@ -803,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
803 | ew32(IMC, 0xffffffff); | 817 | ew32(IMC, 0xffffffff); |
804 | icr = er32(ICR); | 818 | icr = er32(ICR); |
805 | 819 | ||
806 | return 0; | 820 | ret_val = e1000_check_alt_mac_addr_generic(hw); |
821 | |||
822 | return ret_val; | ||
807 | } | 823 | } |
808 | 824 | ||
809 | /** | 825 | /** |
@@ -811,7 +827,6 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
811 | * @hw: pointer to the HW structure | 827 | * @hw: pointer to the HW structure |
812 | * | 828 | * |
813 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. | 829 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. |
814 | * This is a function pointer entry point called by the api module. | ||
815 | **/ | 830 | **/ |
816 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | 831 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) |
817 | { | 832 | { |
@@ -824,20 +839,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
824 | 839 | ||
825 | /* Initialize identification LED */ | 840 | /* Initialize identification LED */ |
826 | ret_val = e1000e_id_led_init(hw); | 841 | ret_val = e1000e_id_led_init(hw); |
827 | if (ret_val) { | 842 | if (ret_val) |
828 | hw_dbg(hw, "Error initializing identification LED\n"); | 843 | e_dbg("Error initializing identification LED\n"); |
829 | return ret_val; | 844 | /* This is not fatal and we should not stop init due to this */ |
830 | } | ||
831 | 845 | ||
832 | /* Disabling VLAN filtering */ | 846 | /* Disabling VLAN filtering */ |
833 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 847 | e_dbg("Initializing the IEEE VLAN\n"); |
834 | e1000e_clear_vfta(hw); | 848 | mac->ops.clear_vfta(hw); |
835 | 849 | ||
836 | /* Setup the receive address. */ | 850 | /* Setup the receive address. */ |
837 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 851 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
838 | 852 | ||
839 | /* Zero out the Multicast HASH table */ | 853 | /* Zero out the Multicast HASH table */ |
840 | hw_dbg(hw, "Zeroing the MTA\n"); | 854 | e_dbg("Zeroing the MTA\n"); |
841 | for (i = 0; i < mac->mta_reg_count; i++) | 855 | for (i = 0; i < mac->mta_reg_count; i++) |
842 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 856 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
843 | 857 | ||
@@ -877,6 +891,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
877 | reg_data &= ~0x00100000; | 891 | reg_data &= ~0x00100000; |
878 | E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); | 892 | E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); |
879 | 893 | ||
894 | /* default to true to enable the MDIC W/A */ | ||
895 | hw->dev_spec.e80003es2lan.mdic_wa_enable = true; | ||
896 | |||
897 | ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | ||
898 | E1000_KMRNCTRLSTA_OFFSET >> | ||
899 | E1000_KMRNCTRLSTA_OFFSET_SHIFT, | ||
900 | &i); | ||
901 | if (!ret_val) { | ||
902 | if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == | ||
903 | E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) | ||
904 | hw->dev_spec.e80003es2lan.mdic_wa_enable = false; | ||
905 | } | ||
906 | |||
880 | /* | 907 | /* |
881 | * Clear all of the statistics registers (clear on read). It is | 908 | * Clear all of the statistics registers (clear on read). It is |
882 | * important that we do this after we have tried to establish link | 909 | * important that we do this after we have tried to establish link |
@@ -994,7 +1021,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
994 | /* SW Reset the PHY so all changes take effect */ | 1021 | /* SW Reset the PHY so all changes take effect */ |
995 | ret_val = e1000e_commit_phy(hw); | 1022 | ret_val = e1000e_commit_phy(hw); |
996 | if (ret_val) { | 1023 | if (ret_val) { |
997 | hw_dbg(hw, "Error Resetting the PHY\n"); | 1024 | e_dbg("Error Resetting the PHY\n"); |
998 | return ret_val; | 1025 | return ret_val; |
999 | } | 1026 | } |
1000 | 1027 | ||
@@ -1318,6 +1345,46 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1318 | } | 1345 | } |
1319 | 1346 | ||
1320 | /** | 1347 | /** |
1348 | * e1000_read_mac_addr_80003es2lan - Read device MAC address | ||
1349 | * @hw: pointer to the HW structure | ||
1350 | **/ | ||
1351 | static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) | ||
1352 | { | ||
1353 | s32 ret_val = 0; | ||
1354 | |||
1355 | /* | ||
1356 | * If there's an alternate MAC address place it in RAR0 | ||
1357 | * so that it will override the Si installed default perm | ||
1358 | * address. | ||
1359 | */ | ||
1360 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
1361 | if (ret_val) | ||
1362 | goto out; | ||
1363 | |||
1364 | ret_val = e1000_read_mac_addr_generic(hw); | ||
1365 | |||
1366 | out: | ||
1367 | return ret_val; | ||
1368 | } | ||
1369 | |||
1370 | /** | ||
1371 | * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down | ||
1372 | * @hw: pointer to the HW structure | ||
1373 | * | ||
1374 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1375 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1376 | **/ | ||
1377 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) | ||
1378 | { | ||
1379 | /* If the management interface is not enabled, then power down */ | ||
1380 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
1381 | hw->phy.ops.check_reset_block(hw))) | ||
1382 | e1000_power_down_phy_copper(hw); | ||
1383 | |||
1384 | return; | ||
1385 | } | ||
1386 | |||
1387 | /** | ||
1321 | * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters | 1388 | * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters |
1322 | * @hw: pointer to the HW structure | 1389 | * @hw: pointer to the HW structure |
1323 | * | 1390 | * |
@@ -1325,57 +1392,59 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1325 | **/ | 1392 | **/ |
1326 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) | 1393 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) |
1327 | { | 1394 | { |
1328 | u32 temp; | ||
1329 | |||
1330 | e1000e_clear_hw_cntrs_base(hw); | 1395 | e1000e_clear_hw_cntrs_base(hw); |
1331 | 1396 | ||
1332 | temp = er32(PRC64); | 1397 | er32(PRC64); |
1333 | temp = er32(PRC127); | 1398 | er32(PRC127); |
1334 | temp = er32(PRC255); | 1399 | er32(PRC255); |
1335 | temp = er32(PRC511); | 1400 | er32(PRC511); |
1336 | temp = er32(PRC1023); | 1401 | er32(PRC1023); |
1337 | temp = er32(PRC1522); | 1402 | er32(PRC1522); |
1338 | temp = er32(PTC64); | 1403 | er32(PTC64); |
1339 | temp = er32(PTC127); | 1404 | er32(PTC127); |
1340 | temp = er32(PTC255); | 1405 | er32(PTC255); |
1341 | temp = er32(PTC511); | 1406 | er32(PTC511); |
1342 | temp = er32(PTC1023); | 1407 | er32(PTC1023); |
1343 | temp = er32(PTC1522); | 1408 | er32(PTC1522); |
1344 | 1409 | ||
1345 | temp = er32(ALGNERRC); | 1410 | er32(ALGNERRC); |
1346 | temp = er32(RXERRC); | 1411 | er32(RXERRC); |
1347 | temp = er32(TNCRS); | 1412 | er32(TNCRS); |
1348 | temp = er32(CEXTERR); | 1413 | er32(CEXTERR); |
1349 | temp = er32(TSCTC); | 1414 | er32(TSCTC); |
1350 | temp = er32(TSCTFC); | 1415 | er32(TSCTFC); |
1351 | 1416 | ||
1352 | temp = er32(MGTPRC); | 1417 | er32(MGTPRC); |
1353 | temp = er32(MGTPDC); | 1418 | er32(MGTPDC); |
1354 | temp = er32(MGTPTC); | 1419 | er32(MGTPTC); |
1355 | 1420 | ||
1356 | temp = er32(IAC); | 1421 | er32(IAC); |
1357 | temp = er32(ICRXOC); | 1422 | er32(ICRXOC); |
1358 | 1423 | ||
1359 | temp = er32(ICRXPTC); | 1424 | er32(ICRXPTC); |
1360 | temp = er32(ICRXATC); | 1425 | er32(ICRXATC); |
1361 | temp = er32(ICTXPTC); | 1426 | er32(ICTXPTC); |
1362 | temp = er32(ICTXATC); | 1427 | er32(ICTXATC); |
1363 | temp = er32(ICTXQEC); | 1428 | er32(ICTXQEC); |
1364 | temp = er32(ICTXQMTC); | 1429 | er32(ICTXQMTC); |
1365 | temp = er32(ICRXDMTC); | 1430 | er32(ICRXDMTC); |
1366 | } | 1431 | } |
1367 | 1432 | ||
1368 | static struct e1000_mac_operations es2_mac_ops = { | 1433 | static struct e1000_mac_operations es2_mac_ops = { |
1434 | .read_mac_addr = e1000_read_mac_addr_80003es2lan, | ||
1369 | .id_led_init = e1000e_id_led_init, | 1435 | .id_led_init = e1000e_id_led_init, |
1370 | .check_mng_mode = e1000e_check_mng_mode_generic, | 1436 | .check_mng_mode = e1000e_check_mng_mode_generic, |
1371 | /* check_for_link dependent on media type */ | 1437 | /* check_for_link dependent on media type */ |
1372 | .cleanup_led = e1000e_cleanup_led_generic, | 1438 | .cleanup_led = e1000e_cleanup_led_generic, |
1373 | .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, | 1439 | .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, |
1374 | .get_bus_info = e1000e_get_bus_info_pcie, | 1440 | .get_bus_info = e1000e_get_bus_info_pcie, |
1441 | .set_lan_id = e1000_set_lan_id_multi_port_pcie, | ||
1375 | .get_link_up_info = e1000_get_link_up_info_80003es2lan, | 1442 | .get_link_up_info = e1000_get_link_up_info_80003es2lan, |
1376 | .led_on = e1000e_led_on_generic, | 1443 | .led_on = e1000e_led_on_generic, |
1377 | .led_off = e1000e_led_off_generic, | 1444 | .led_off = e1000e_led_off_generic, |
1378 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, | 1445 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, |
1446 | .write_vfta = e1000_write_vfta_generic, | ||
1447 | .clear_vfta = e1000_clear_vfta_generic, | ||
1379 | .reset_hw = e1000_reset_hw_80003es2lan, | 1448 | .reset_hw = e1000_reset_hw_80003es2lan, |
1380 | .init_hw = e1000_init_hw_80003es2lan, | 1449 | .init_hw = e1000_init_hw_80003es2lan, |
1381 | .setup_link = e1000e_setup_link, | 1450 | .setup_link = e1000e_setup_link, |
@@ -1384,30 +1453,31 @@ static struct e1000_mac_operations es2_mac_ops = { | |||
1384 | }; | 1453 | }; |
1385 | 1454 | ||
1386 | static struct e1000_phy_operations es2_phy_ops = { | 1455 | static struct e1000_phy_operations es2_phy_ops = { |
1387 | .acquire_phy = e1000_acquire_phy_80003es2lan, | 1456 | .acquire = e1000_acquire_phy_80003es2lan, |
1457 | .check_polarity = e1000_check_polarity_m88, | ||
1388 | .check_reset_block = e1000e_check_reset_block_generic, | 1458 | .check_reset_block = e1000e_check_reset_block_generic, |
1389 | .commit_phy = e1000e_phy_sw_reset, | 1459 | .commit = e1000e_phy_sw_reset, |
1390 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | 1460 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, |
1391 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, | 1461 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, |
1392 | .get_cable_length = e1000_get_cable_length_80003es2lan, | 1462 | .get_cable_length = e1000_get_cable_length_80003es2lan, |
1393 | .get_phy_info = e1000e_get_phy_info_m88, | 1463 | .get_info = e1000e_get_phy_info_m88, |
1394 | .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, | 1464 | .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, |
1395 | .release_phy = e1000_release_phy_80003es2lan, | 1465 | .release = e1000_release_phy_80003es2lan, |
1396 | .reset_phy = e1000e_phy_hw_reset_generic, | 1466 | .reset = e1000e_phy_hw_reset_generic, |
1397 | .set_d0_lplu_state = NULL, | 1467 | .set_d0_lplu_state = NULL, |
1398 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1468 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1399 | .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, | 1469 | .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, |
1400 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | 1470 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, |
1401 | }; | 1471 | }; |
1402 | 1472 | ||
1403 | static struct e1000_nvm_operations es2_nvm_ops = { | 1473 | static struct e1000_nvm_operations es2_nvm_ops = { |
1404 | .acquire_nvm = e1000_acquire_nvm_80003es2lan, | 1474 | .acquire = e1000_acquire_nvm_80003es2lan, |
1405 | .read_nvm = e1000e_read_nvm_eerd, | 1475 | .read = e1000e_read_nvm_eerd, |
1406 | .release_nvm = e1000_release_nvm_80003es2lan, | 1476 | .release = e1000_release_nvm_80003es2lan, |
1407 | .update_nvm = e1000e_update_nvm_checksum_generic, | 1477 | .update = e1000e_update_nvm_checksum_generic, |
1408 | .valid_led_default = e1000e_valid_led_default, | 1478 | .valid_led_default = e1000e_valid_led_default, |
1409 | .validate_nvm = e1000e_validate_nvm_checksum_generic, | 1479 | .validate = e1000e_validate_nvm_checksum_generic, |
1410 | .write_nvm = e1000_write_nvm_80003es2lan, | 1480 | .write = e1000_write_nvm_80003es2lan, |
1411 | }; | 1481 | }; |
1412 | 1482 | ||
1413 | struct e1000_info e1000_es2_info = { | 1483 | struct e1000_info e1000_es2_info = { |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index e82638ecae88..983493f2330c 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -31,18 +31,27 @@ | |||
31 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
32 | #include <linux/ethtool.h> | 32 | #include <linux/ethtool.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
35 | 36 | ||
36 | #include "e1000.h" | 37 | #include "e1000.h" |
37 | 38 | ||
39 | enum {NETDEV_STATS, E1000_STATS}; | ||
40 | |||
38 | struct e1000_stats { | 41 | struct e1000_stats { |
39 | char stat_string[ETH_GSTRING_LEN]; | 42 | char stat_string[ETH_GSTRING_LEN]; |
43 | int type; | ||
40 | int sizeof_stat; | 44 | int sizeof_stat; |
41 | int stat_offset; | 45 | int stat_offset; |
42 | }; | 46 | }; |
43 | 47 | ||
44 | #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ | 48 | #define E1000_STAT(m) E1000_STATS, \ |
45 | offsetof(struct e1000_adapter, m) | 49 | sizeof(((struct e1000_adapter *)0)->m), \ |
50 | offsetof(struct e1000_adapter, m) | ||
51 | #define E1000_NETDEV_STAT(m) NETDEV_STATS, \ | ||
52 | sizeof(((struct net_device *)0)->m), \ | ||
53 | offsetof(struct net_device, m) | ||
54 | |||
46 | static const struct e1000_stats e1000_gstrings_stats[] = { | 55 | static const struct e1000_stats e1000_gstrings_stats[] = { |
47 | { "rx_packets", E1000_STAT(stats.gprc) }, | 56 | { "rx_packets", E1000_STAT(stats.gprc) }, |
48 | { "tx_packets", E1000_STAT(stats.gptc) }, | 57 | { "tx_packets", E1000_STAT(stats.gptc) }, |
@@ -52,21 +61,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
52 | { "tx_broadcast", E1000_STAT(stats.bptc) }, | 61 | { "tx_broadcast", E1000_STAT(stats.bptc) }, |
53 | { "rx_multicast", E1000_STAT(stats.mprc) }, | 62 | { "rx_multicast", E1000_STAT(stats.mprc) }, |
54 | { "tx_multicast", E1000_STAT(stats.mptc) }, | 63 | { "tx_multicast", E1000_STAT(stats.mptc) }, |
55 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | 64 | { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) }, |
56 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | 65 | { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) }, |
57 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | 66 | { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, |
58 | { "multicast", E1000_STAT(stats.mprc) }, | 67 | { "multicast", E1000_STAT(stats.mprc) }, |
59 | { "collisions", E1000_STAT(stats.colc) }, | 68 | { "collisions", E1000_STAT(stats.colc) }, |
60 | { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, | 69 | { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) }, |
61 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | 70 | { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, |
62 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, | 71 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, |
63 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | 72 | { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, |
64 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | 73 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, |
65 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, | 74 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, |
66 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, | 75 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, |
67 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, | 76 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, |
68 | { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, | 77 | { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, |
69 | { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, | 78 | { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, |
70 | { "tx_window_errors", E1000_STAT(stats.latecol) }, | 79 | { "tx_window_errors", E1000_STAT(stats.latecol) }, |
71 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, | 80 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, |
72 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, | 81 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, |
@@ -182,8 +191,19 @@ static int e1000_get_settings(struct net_device *netdev, | |||
182 | static u32 e1000_get_link(struct net_device *netdev) | 191 | static u32 e1000_get_link(struct net_device *netdev) |
183 | { | 192 | { |
184 | struct e1000_adapter *adapter = netdev_priv(netdev); | 193 | struct e1000_adapter *adapter = netdev_priv(netdev); |
194 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
195 | |||
196 | /* | ||
197 | * If the link is not reported up to netdev, interrupts are disabled, | ||
198 | * and so the physical link state may have changed since we last | ||
199 | * looked. Set get_link_status to make sure that the true link | ||
200 | * state is interrogated, rather than pulling a cached and possibly | ||
201 | * stale link state from the driver. | ||
202 | */ | ||
203 | if (!netif_carrier_ok(netdev)) | ||
204 | mac->get_link_status = 1; | ||
185 | 205 | ||
186 | return e1000_has_link(adapter); | 206 | return e1000e_has_link(adapter); |
187 | } | 207 | } |
188 | 208 | ||
189 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | 209 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
@@ -516,7 +536,8 @@ static int e1000_get_eeprom(struct net_device *netdev, | |||
516 | 536 | ||
517 | if (ret_val) { | 537 | if (ret_val) { |
518 | /* a read error occurred, throw away the result */ | 538 | /* a read error occurred, throw away the result */ |
519 | memset(eeprom_buff, 0xff, sizeof(eeprom_buff)); | 539 | memset(eeprom_buff, 0xff, sizeof(u16) * |
540 | (last_word - first_word + 1)); | ||
520 | } else { | 541 | } else { |
521 | /* Device's eeprom is always little-endian, word addressable */ | 542 | /* Device's eeprom is always little-endian, word addressable */ |
522 | for (i = 0; i < last_word - first_word + 1; i++) | 543 | for (i = 0; i < last_word - first_word + 1; i++) |
@@ -596,7 +617,9 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
596 | * and flush shadow RAM for applicable controllers | 617 | * and flush shadow RAM for applicable controllers |
597 | */ | 618 | */ |
598 | if ((first_word <= NVM_CHECKSUM_REG) || | 619 | if ((first_word <= NVM_CHECKSUM_REG) || |
599 | (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573)) | 620 | (hw->mac.type == e1000_82583) || |
621 | (hw->mac.type == e1000_82574) || | ||
622 | (hw->mac.type == e1000_82573)) | ||
600 | ret_val = e1000e_update_nvm_checksum(hw); | 623 | ret_val = e1000e_update_nvm_checksum(hw); |
601 | 624 | ||
602 | out: | 625 | out: |
@@ -929,10 +952,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
929 | e1000e_set_interrupt_capability(adapter); | 952 | e1000e_set_interrupt_capability(adapter); |
930 | } | 953 | } |
931 | /* Hook up test interrupt handler just for this test */ | 954 | /* Hook up test interrupt handler just for this test */ |
932 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | 955 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, |
933 | netdev)) { | 956 | netdev)) { |
934 | shared_int = 0; | 957 | shared_int = 0; |
935 | } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, | 958 | } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, |
936 | netdev->name, netdev)) { | 959 | netdev->name, netdev)) { |
937 | *data = 1; | 960 | *data = 1; |
938 | ret_val = -1; | 961 | ret_val = -1; |
@@ -1239,6 +1262,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1239 | 1262 | ||
1240 | hw->mac.autoneg = 0; | 1263 | hw->mac.autoneg = 0; |
1241 | 1264 | ||
1265 | /* Workaround: K1 must be disabled for stable 1Gbps operation */ | ||
1266 | if (hw->mac.type == e1000_pchlan) | ||
1267 | e1000_configure_k1_ich8lan(hw, false); | ||
1268 | |||
1242 | if (hw->phy.type == e1000_phy_m88) { | 1269 | if (hw->phy.type == e1000_phy_m88) { |
1243 | /* Auto-MDI/MDIX Off */ | 1270 | /* Auto-MDI/MDIX Off */ |
1244 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | 1271 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); |
@@ -1769,12 +1796,11 @@ static int e1000_set_wol(struct net_device *netdev, | |||
1769 | { | 1796 | { |
1770 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1797 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1771 | 1798 | ||
1772 | if (wol->wolopts & WAKE_MAGICSECURE) | ||
1773 | return -EOPNOTSUPP; | ||
1774 | |||
1775 | if (!(adapter->flags & FLAG_HAS_WOL) || | 1799 | if (!(adapter->flags & FLAG_HAS_WOL) || |
1776 | !device_can_wakeup(&adapter->pdev->dev)) | 1800 | !device_can_wakeup(&adapter->pdev->dev) || |
1777 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1801 | (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | |
1802 | WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) | ||
1803 | return -EOPNOTSUPP; | ||
1778 | 1804 | ||
1779 | /* these settings will always override what we currently have */ | 1805 | /* these settings will always override what we currently have */ |
1780 | adapter->wol = 0; | 1806 | adapter->wol = 0; |
@@ -1832,6 +1858,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data) | |||
1832 | 1858 | ||
1833 | if ((hw->phy.type == e1000_phy_ife) || | 1859 | if ((hw->phy.type == e1000_phy_ife) || |
1834 | (hw->mac.type == e1000_pchlan) || | 1860 | (hw->mac.type == e1000_pchlan) || |
1861 | (hw->mac.type == e1000_82583) || | ||
1835 | (hw->mac.type == e1000_82574)) { | 1862 | (hw->mac.type == e1000_82574)) { |
1836 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); | 1863 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); |
1837 | if (!adapter->blink_timer.function) { | 1864 | if (!adapter->blink_timer.function) { |
@@ -1912,10 +1939,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1912 | { | 1939 | { |
1913 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1940 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1914 | int i; | 1941 | int i; |
1942 | char *p = NULL; | ||
1915 | 1943 | ||
1916 | e1000e_update_stats(adapter); | 1944 | e1000e_update_stats(adapter); |
1917 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1945 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { |
1918 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; | 1946 | switch (e1000_gstrings_stats[i].type) { |
1947 | case NETDEV_STATS: | ||
1948 | p = (char *) netdev + | ||
1949 | e1000_gstrings_stats[i].stat_offset; | ||
1950 | break; | ||
1951 | case E1000_STATS: | ||
1952 | p = (char *) adapter + | ||
1953 | e1000_gstrings_stats[i].stat_offset; | ||
1954 | break; | ||
1955 | } | ||
1956 | |||
1919 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1957 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1920 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1958 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1921 | } | 1959 | } |
@@ -1975,6 +2013,8 @@ static const struct ethtool_ops e1000_ethtool_ops = { | |||
1975 | .get_sset_count = e1000e_get_sset_count, | 2013 | .get_sset_count = e1000e_get_sset_count, |
1976 | .get_coalesce = e1000_get_coalesce, | 2014 | .get_coalesce = e1000_get_coalesce, |
1977 | .set_coalesce = e1000_set_coalesce, | 2015 | .set_coalesce = e1000_set_coalesce, |
2016 | .get_flags = ethtool_op_get_flags, | ||
2017 | .set_flags = ethtool_op_set_flags, | ||
1978 | }; | 2018 | }; |
1979 | 2019 | ||
1980 | void e1000e_set_ethtool_ops(struct net_device *netdev) | 2020 | void e1000e_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aaea41ef794d..8bdcd5f24eff 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -219,7 +219,7 @@ enum e1e_registers { | |||
219 | E1000_HICR = 0x08F00, /* Host Interface Control */ | 219 | E1000_HICR = 0x08F00, /* Host Interface Control */ |
220 | }; | 220 | }; |
221 | 221 | ||
222 | /* RSS registers */ | 222 | #define E1000_MAX_PHY_ADDR 4 |
223 | 223 | ||
224 | /* IGP01E1000 Specific Registers */ | 224 | /* IGP01E1000 Specific Registers */ |
225 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ | 225 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ |
@@ -302,6 +302,8 @@ enum e1e_registers { | |||
302 | #define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 | 302 | #define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 |
303 | #define E1000_KMRNCTRLSTA_REN 0x00200000 | 303 | #define E1000_KMRNCTRLSTA_REN 0x00200000 |
304 | #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ | 304 | #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ |
305 | #define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ | ||
306 | #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ | ||
305 | #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ | 307 | #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ |
306 | #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 | 308 | #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 |
307 | #define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E | 309 | #define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E |
@@ -356,6 +358,7 @@ enum e1e_registers { | |||
356 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | 358 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA |
357 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | 359 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB |
358 | 360 | ||
361 | #define E1000_DEV_ID_ICH8_82567V_3 0x1501 | ||
359 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | 362 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 |
360 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | 363 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A |
361 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | 364 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B |
@@ -386,6 +389,9 @@ enum e1e_registers { | |||
386 | 389 | ||
387 | #define E1000_FUNC_1 1 | 390 | #define E1000_FUNC_1 1 |
388 | 391 | ||
392 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 | ||
393 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 | ||
394 | |||
389 | enum e1000_mac_type { | 395 | enum e1000_mac_type { |
390 | e1000_82571, | 396 | e1000_82571, |
391 | e1000_82572, | 397 | e1000_82572, |
@@ -741,48 +747,54 @@ struct e1000_mac_operations { | |||
741 | s32 (*check_for_link)(struct e1000_hw *); | 747 | s32 (*check_for_link)(struct e1000_hw *); |
742 | s32 (*cleanup_led)(struct e1000_hw *); | 748 | s32 (*cleanup_led)(struct e1000_hw *); |
743 | void (*clear_hw_cntrs)(struct e1000_hw *); | 749 | void (*clear_hw_cntrs)(struct e1000_hw *); |
750 | void (*clear_vfta)(struct e1000_hw *); | ||
744 | s32 (*get_bus_info)(struct e1000_hw *); | 751 | s32 (*get_bus_info)(struct e1000_hw *); |
752 | void (*set_lan_id)(struct e1000_hw *); | ||
745 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); | 753 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); |
746 | s32 (*led_on)(struct e1000_hw *); | 754 | s32 (*led_on)(struct e1000_hw *); |
747 | s32 (*led_off)(struct e1000_hw *); | 755 | s32 (*led_off)(struct e1000_hw *); |
748 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); | 756 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); |
749 | s32 (*reset_hw)(struct e1000_hw *); | 757 | s32 (*reset_hw)(struct e1000_hw *); |
750 | s32 (*init_hw)(struct e1000_hw *); | 758 | s32 (*init_hw)(struct e1000_hw *); |
751 | s32 (*setup_link)(struct e1000_hw *); | 759 | s32 (*setup_link)(struct e1000_hw *); |
752 | s32 (*setup_physical_interface)(struct e1000_hw *); | 760 | s32 (*setup_physical_interface)(struct e1000_hw *); |
753 | s32 (*setup_led)(struct e1000_hw *); | 761 | s32 (*setup_led)(struct e1000_hw *); |
762 | void (*write_vfta)(struct e1000_hw *, u32, u32); | ||
763 | s32 (*read_mac_addr)(struct e1000_hw *); | ||
754 | }; | 764 | }; |
755 | 765 | ||
756 | /* Function pointers for the PHY. */ | 766 | /* Function pointers for the PHY. */ |
757 | struct e1000_phy_operations { | 767 | struct e1000_phy_operations { |
758 | s32 (*acquire_phy)(struct e1000_hw *); | 768 | s32 (*acquire)(struct e1000_hw *); |
769 | s32 (*cfg_on_link_up)(struct e1000_hw *); | ||
759 | s32 (*check_polarity)(struct e1000_hw *); | 770 | s32 (*check_polarity)(struct e1000_hw *); |
760 | s32 (*check_reset_block)(struct e1000_hw *); | 771 | s32 (*check_reset_block)(struct e1000_hw *); |
761 | s32 (*commit_phy)(struct e1000_hw *); | 772 | s32 (*commit)(struct e1000_hw *); |
762 | s32 (*force_speed_duplex)(struct e1000_hw *); | 773 | s32 (*force_speed_duplex)(struct e1000_hw *); |
763 | s32 (*get_cfg_done)(struct e1000_hw *hw); | 774 | s32 (*get_cfg_done)(struct e1000_hw *hw); |
764 | s32 (*get_cable_length)(struct e1000_hw *); | 775 | s32 (*get_cable_length)(struct e1000_hw *); |
765 | s32 (*get_phy_info)(struct e1000_hw *); | 776 | s32 (*get_info)(struct e1000_hw *); |
766 | s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); | 777 | s32 (*read_reg)(struct e1000_hw *, u32, u16 *); |
767 | s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); | 778 | s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); |
768 | void (*release_phy)(struct e1000_hw *); | 779 | void (*release)(struct e1000_hw *); |
769 | s32 (*reset_phy)(struct e1000_hw *); | 780 | s32 (*reset)(struct e1000_hw *); |
770 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); | 781 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); |
771 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); | 782 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); |
772 | s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); | 783 | s32 (*write_reg)(struct e1000_hw *, u32, u16); |
773 | s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); | 784 | s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); |
774 | s32 (*cfg_on_link_up)(struct e1000_hw *); | 785 | void (*power_up)(struct e1000_hw *); |
786 | void (*power_down)(struct e1000_hw *); | ||
775 | }; | 787 | }; |
776 | 788 | ||
777 | /* Function pointers for the NVM. */ | 789 | /* Function pointers for the NVM. */ |
778 | struct e1000_nvm_operations { | 790 | struct e1000_nvm_operations { |
779 | s32 (*acquire_nvm)(struct e1000_hw *); | 791 | s32 (*acquire)(struct e1000_hw *); |
780 | s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); | 792 | s32 (*read)(struct e1000_hw *, u16, u16, u16 *); |
781 | void (*release_nvm)(struct e1000_hw *); | 793 | void (*release)(struct e1000_hw *); |
782 | s32 (*update_nvm)(struct e1000_hw *); | 794 | s32 (*update)(struct e1000_hw *); |
783 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); | 795 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); |
784 | s32 (*validate_nvm)(struct e1000_hw *); | 796 | s32 (*validate)(struct e1000_hw *); |
785 | s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); | 797 | s32 (*write)(struct e1000_hw *, u16, u16, u16 *); |
786 | }; | 798 | }; |
787 | 799 | ||
788 | struct e1000_mac_info { | 800 | struct e1000_mac_info { |
@@ -807,10 +819,15 @@ struct e1000_mac_info { | |||
807 | u16 ifs_ratio; | 819 | u16 ifs_ratio; |
808 | u16 ifs_step_size; | 820 | u16 ifs_step_size; |
809 | u16 mta_reg_count; | 821 | u16 mta_reg_count; |
822 | |||
823 | /* Maximum size of the MTA register table in all supported adapters */ | ||
824 | #define MAX_MTA_REG 128 | ||
825 | u32 mta_shadow[MAX_MTA_REG]; | ||
810 | u16 rar_entry_count; | 826 | u16 rar_entry_count; |
811 | 827 | ||
812 | u8 forced_speed_duplex; | 828 | u8 forced_speed_duplex; |
813 | 829 | ||
830 | bool adaptive_ifs; | ||
814 | bool arc_subsystem_valid; | 831 | bool arc_subsystem_valid; |
815 | bool autoneg; | 832 | bool autoneg; |
816 | bool autoneg_failed; | 833 | bool autoneg_failed; |
@@ -889,10 +906,13 @@ struct e1000_fc_info { | |||
889 | 906 | ||
890 | struct e1000_dev_spec_82571 { | 907 | struct e1000_dev_spec_82571 { |
891 | bool laa_is_present; | 908 | bool laa_is_present; |
892 | bool alt_mac_addr_is_present; | ||
893 | u32 smb_counter; | 909 | u32 smb_counter; |
894 | }; | 910 | }; |
895 | 911 | ||
912 | struct e1000_dev_spec_80003es2lan { | ||
913 | bool mdic_wa_enable; | ||
914 | }; | ||
915 | |||
896 | struct e1000_shadow_ram { | 916 | struct e1000_shadow_ram { |
897 | u16 value; | 917 | u16 value; |
898 | bool modified; | 918 | bool modified; |
@@ -921,19 +941,9 @@ struct e1000_hw { | |||
921 | 941 | ||
922 | union { | 942 | union { |
923 | struct e1000_dev_spec_82571 e82571; | 943 | struct e1000_dev_spec_82571 e82571; |
944 | struct e1000_dev_spec_80003es2lan e80003es2lan; | ||
924 | struct e1000_dev_spec_ich8lan ich8lan; | 945 | struct e1000_dev_spec_ich8lan ich8lan; |
925 | } dev_spec; | 946 | } dev_spec; |
926 | }; | 947 | }; |
927 | 948 | ||
928 | #ifdef DEBUG | ||
929 | #define hw_dbg(hw, format, arg...) \ | ||
930 | printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) | ||
931 | #else | ||
932 | static inline int __attribute__ ((format (printf, 2, 3))) | ||
933 | hw_dbg(struct e1000_hw *hw, const char *format, ...) | ||
934 | { | ||
935 | return 0; | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | #endif | 949 | #endif |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index eff3f4783655..8b5e157e9c87 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -54,11 +54,6 @@ | |||
54 | * 82578DC Gigabit Network Connection | 54 | * 82578DC Gigabit Network Connection |
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include <linux/netdevice.h> | ||
58 | #include <linux/ethtool.h> | ||
59 | #include <linux/delay.h> | ||
60 | #include <linux/pci.h> | ||
61 | |||
62 | #include "e1000.h" | 57 | #include "e1000.h" |
63 | 58 | ||
64 | #define ICH_FLASH_GFPREG 0x0000 | 59 | #define ICH_FLASH_GFPREG 0x0000 |
@@ -143,6 +138,10 @@ | |||
143 | #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ | 138 | #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ |
144 | #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ | 139 | #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ |
145 | 140 | ||
141 | /* KMRN Mode Control */ | ||
142 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) | ||
143 | #define HV_KMRN_MDIO_SLOW 0x0400 | ||
144 | |||
146 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | 145 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ |
147 | /* Offset 04h HSFSTS */ | 146 | /* Offset 04h HSFSTS */ |
148 | union ich8_hws_flash_status { | 147 | union ich8_hws_flash_status { |
@@ -200,7 +199,6 @@ union ich8_flash_protected_range { | |||
200 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); | 199 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); |
201 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); | 200 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); |
202 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); | 201 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); |
203 | static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); | ||
204 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); | 202 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); |
205 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | 203 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, |
206 | u32 offset, u8 byte); | 204 | u32 offset, u8 byte); |
@@ -222,9 +220,10 @@ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); | |||
222 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); | 220 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); |
223 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); | 221 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); |
224 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | 222 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); |
223 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); | ||
225 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | 224 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); |
226 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | 225 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); |
227 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | 226 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); |
228 | 227 | ||
229 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | 228 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) |
230 | { | 229 | { |
@@ -265,28 +264,54 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
265 | phy->addr = 1; | 264 | phy->addr = 1; |
266 | phy->reset_delay_us = 100; | 265 | phy->reset_delay_us = 100; |
267 | 266 | ||
268 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | 267 | phy->ops.read_reg = e1000_read_phy_reg_hv; |
269 | phy->ops.read_phy_reg = e1000_read_phy_reg_hv; | 268 | phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; |
270 | phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; | ||
271 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | 269 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; |
272 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | 270 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; |
273 | phy->ops.write_phy_reg = e1000_write_phy_reg_hv; | 271 | phy->ops.write_reg = e1000_write_phy_reg_hv; |
274 | phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; | 272 | phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; |
273 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
274 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
275 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 275 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
276 | 276 | ||
277 | phy->id = e1000_phy_unknown; | 277 | phy->id = e1000_phy_unknown; |
278 | e1000e_get_phy_id(hw); | 278 | ret_val = e1000e_get_phy_id(hw); |
279 | if (ret_val) | ||
280 | goto out; | ||
281 | if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) { | ||
282 | /* | ||
283 | * In case the PHY needs to be in mdio slow mode (eg. 82577), | ||
284 | * set slow mode and try to get the PHY id again. | ||
285 | */ | ||
286 | ret_val = e1000_set_mdio_slow_mode_hv(hw); | ||
287 | if (ret_val) | ||
288 | goto out; | ||
289 | ret_val = e1000e_get_phy_id(hw); | ||
290 | if (ret_val) | ||
291 | goto out; | ||
292 | } | ||
279 | phy->type = e1000e_get_phy_type_from_id(phy->id); | 293 | phy->type = e1000e_get_phy_type_from_id(phy->id); |
280 | 294 | ||
281 | if (phy->type == e1000_phy_82577) { | 295 | switch (phy->type) { |
296 | case e1000_phy_82577: | ||
282 | phy->ops.check_polarity = e1000_check_polarity_82577; | 297 | phy->ops.check_polarity = e1000_check_polarity_82577; |
283 | phy->ops.force_speed_duplex = | 298 | phy->ops.force_speed_duplex = |
284 | e1000_phy_force_speed_duplex_82577; | 299 | e1000_phy_force_speed_duplex_82577; |
285 | phy->ops.get_cable_length = e1000_get_cable_length_82577; | 300 | phy->ops.get_cable_length = e1000_get_cable_length_82577; |
286 | phy->ops.get_phy_info = e1000_get_phy_info_82577; | 301 | phy->ops.get_info = e1000_get_phy_info_82577; |
287 | phy->ops.commit_phy = e1000e_phy_sw_reset; | 302 | phy->ops.commit = e1000e_phy_sw_reset; |
303 | case e1000_phy_82578: | ||
304 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
305 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
306 | phy->ops.get_cable_length = e1000e_get_cable_length_m88; | ||
307 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
308 | break; | ||
309 | default: | ||
310 | ret_val = -E1000_ERR_PHY; | ||
311 | break; | ||
288 | } | 312 | } |
289 | 313 | ||
314 | out: | ||
290 | return ret_val; | 315 | return ret_val; |
291 | } | 316 | } |
292 | 317 | ||
@@ -305,17 +330,22 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
305 | phy->addr = 1; | 330 | phy->addr = 1; |
306 | phy->reset_delay_us = 100; | 331 | phy->reset_delay_us = 100; |
307 | 332 | ||
333 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
334 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
335 | |||
308 | /* | 336 | /* |
309 | * We may need to do this twice - once for IGP and if that fails, | 337 | * We may need to do this twice - once for IGP and if that fails, |
310 | * we'll set BM func pointers and try again | 338 | * we'll set BM func pointers and try again |
311 | */ | 339 | */ |
312 | ret_val = e1000e_determine_phy_address(hw); | 340 | ret_val = e1000e_determine_phy_address(hw); |
313 | if (ret_val) { | 341 | if (ret_val) { |
314 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 342 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
315 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 343 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
316 | ret_val = e1000e_determine_phy_address(hw); | 344 | ret_val = e1000e_determine_phy_address(hw); |
317 | if (ret_val) | 345 | if (ret_val) { |
346 | e_dbg("Cannot determine PHY addr. Erroring out\n"); | ||
318 | return ret_val; | 347 | return ret_val; |
348 | } | ||
319 | } | 349 | } |
320 | 350 | ||
321 | phy->id = 0; | 351 | phy->id = 0; |
@@ -332,29 +362,36 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
332 | case IGP03E1000_E_PHY_ID: | 362 | case IGP03E1000_E_PHY_ID: |
333 | phy->type = e1000_phy_igp_3; | 363 | phy->type = e1000_phy_igp_3; |
334 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 364 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
335 | phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; | 365 | phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; |
336 | phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; | 366 | phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; |
367 | phy->ops.get_info = e1000e_get_phy_info_igp; | ||
368 | phy->ops.check_polarity = e1000_check_polarity_igp; | ||
369 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; | ||
337 | break; | 370 | break; |
338 | case IFE_E_PHY_ID: | 371 | case IFE_E_PHY_ID: |
339 | case IFE_PLUS_E_PHY_ID: | 372 | case IFE_PLUS_E_PHY_ID: |
340 | case IFE_C_E_PHY_ID: | 373 | case IFE_C_E_PHY_ID: |
341 | phy->type = e1000_phy_ife; | 374 | phy->type = e1000_phy_ife; |
342 | phy->autoneg_mask = E1000_ALL_NOT_GIG; | 375 | phy->autoneg_mask = E1000_ALL_NOT_GIG; |
376 | phy->ops.get_info = e1000_get_phy_info_ife; | ||
377 | phy->ops.check_polarity = e1000_check_polarity_ife; | ||
378 | phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; | ||
343 | break; | 379 | break; |
344 | case BME1000_E_PHY_ID: | 380 | case BME1000_E_PHY_ID: |
345 | phy->type = e1000_phy_bm; | 381 | phy->type = e1000_phy_bm; |
346 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 382 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
347 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 383 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
348 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 384 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
349 | hw->phy.ops.commit_phy = e1000e_phy_sw_reset; | 385 | phy->ops.commit = e1000e_phy_sw_reset; |
386 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
387 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
388 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
350 | break; | 389 | break; |
351 | default: | 390 | default: |
352 | return -E1000_ERR_PHY; | 391 | return -E1000_ERR_PHY; |
353 | break; | 392 | break; |
354 | } | 393 | } |
355 | 394 | ||
356 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | ||
357 | |||
358 | return 0; | 395 | return 0; |
359 | } | 396 | } |
360 | 397 | ||
@@ -374,7 +411,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
374 | 411 | ||
375 | /* Can't read flash registers if the register set isn't mapped. */ | 412 | /* Can't read flash registers if the register set isn't mapped. */ |
376 | if (!hw->flash_address) { | 413 | if (!hw->flash_address) { |
377 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); | 414 | e_dbg("ERROR: Flash registers not mapped\n"); |
378 | return -E1000_ERR_CONFIG; | 415 | return -E1000_ERR_CONFIG; |
379 | } | 416 | } |
380 | 417 | ||
@@ -407,7 +444,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
407 | 444 | ||
408 | /* Clear shadow ram */ | 445 | /* Clear shadow ram */ |
409 | for (i = 0; i < nvm->word_size; i++) { | 446 | for (i = 0; i < nvm->word_size; i++) { |
410 | dev_spec->shadow_ram[i].modified = 0; | 447 | dev_spec->shadow_ram[i].modified = false; |
411 | dev_spec->shadow_ram[i].value = 0xFFFF; | 448 | dev_spec->shadow_ram[i].value = 0xFFFF; |
412 | } | 449 | } |
413 | 450 | ||
@@ -436,7 +473,9 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
436 | if (mac->type == e1000_ich8lan) | 473 | if (mac->type == e1000_ich8lan) |
437 | mac->rar_entry_count--; | 474 | mac->rar_entry_count--; |
438 | /* Set if manageability features are enabled. */ | 475 | /* Set if manageability features are enabled. */ |
439 | mac->arc_subsystem_valid = 1; | 476 | mac->arc_subsystem_valid = true; |
477 | /* Adaptive IFS supported */ | ||
478 | mac->adaptive_ifs = true; | ||
440 | 479 | ||
441 | /* LED operations */ | 480 | /* LED operations */ |
442 | switch (mac->type) { | 481 | switch (mac->type) { |
@@ -470,7 +509,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
470 | 509 | ||
471 | /* Enable PCS Lock-loss workaround for ICH8 */ | 510 | /* Enable PCS Lock-loss workaround for ICH8 */ |
472 | if (mac->type == e1000_ich8lan) | 511 | if (mac->type == e1000_ich8lan) |
473 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); | 512 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); |
474 | 513 | ||
475 | return 0; | 514 | return 0; |
476 | } | 515 | } |
@@ -556,7 +595,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
556 | */ | 595 | */ |
557 | ret_val = e1000e_config_fc_after_link_up(hw); | 596 | ret_val = e1000e_config_fc_after_link_up(hw); |
558 | if (ret_val) | 597 | if (ret_val) |
559 | hw_dbg(hw, "Error configuring flow control\n"); | 598 | e_dbg("Error configuring flow control\n"); |
560 | 599 | ||
561 | out: | 600 | out: |
562 | return ret_val; | 601 | return ret_val; |
@@ -636,8 +675,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
636 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; | 675 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; |
637 | s32 ret_val = 0; | 676 | s32 ret_val = 0; |
638 | 677 | ||
639 | might_sleep(); | ||
640 | |||
641 | mutex_lock(&swflag_mutex); | 678 | mutex_lock(&swflag_mutex); |
642 | 679 | ||
643 | while (timeout) { | 680 | while (timeout) { |
@@ -650,7 +687,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
650 | } | 687 | } |
651 | 688 | ||
652 | if (!timeout) { | 689 | if (!timeout) { |
653 | hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); | 690 | e_dbg("SW/FW/HW has locked the resource for too long.\n"); |
654 | ret_val = -E1000_ERR_CONFIG; | 691 | ret_val = -E1000_ERR_CONFIG; |
655 | goto out; | 692 | goto out; |
656 | } | 693 | } |
@@ -670,7 +707,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
670 | } | 707 | } |
671 | 708 | ||
672 | if (!timeout) { | 709 | if (!timeout) { |
673 | hw_dbg(hw, "Failed to acquire the semaphore.\n"); | 710 | e_dbg("Failed to acquire the semaphore.\n"); |
674 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 711 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
675 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 712 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
676 | ret_val = -E1000_ERR_CONFIG; | 713 | ret_val = -E1000_ERR_CONFIG; |
@@ -714,7 +751,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
714 | **/ | 751 | **/ |
715 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) | 752 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) |
716 | { | 753 | { |
717 | u32 fwsm = er32(FWSM); | 754 | u32 fwsm; |
755 | |||
756 | fwsm = er32(FWSM); | ||
718 | 757 | ||
719 | return (fwsm & E1000_FWSM_MODE_MASK) == | 758 | return (fwsm & E1000_FWSM_MODE_MASK) == |
720 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); | 759 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); |
@@ -738,77 +777,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) | |||
738 | } | 777 | } |
739 | 778 | ||
740 | /** | 779 | /** |
741 | * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex | ||
742 | * @hw: pointer to the HW structure | ||
743 | * | ||
744 | * Forces the speed and duplex settings of the PHY. | ||
745 | * This is a function pointer entry point only called by | ||
746 | * PHY setup routines. | ||
747 | **/ | ||
748 | static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | ||
749 | { | ||
750 | struct e1000_phy_info *phy = &hw->phy; | ||
751 | s32 ret_val; | ||
752 | u16 data; | ||
753 | bool link; | ||
754 | |||
755 | if (phy->type != e1000_phy_ife) { | ||
756 | ret_val = e1000e_phy_force_speed_duplex_igp(hw); | ||
757 | return ret_val; | ||
758 | } | ||
759 | |||
760 | ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | ||
761 | if (ret_val) | ||
762 | return ret_val; | ||
763 | |||
764 | e1000e_phy_force_speed_duplex_setup(hw, &data); | ||
765 | |||
766 | ret_val = e1e_wphy(hw, PHY_CONTROL, data); | ||
767 | if (ret_val) | ||
768 | return ret_val; | ||
769 | |||
770 | /* Disable MDI-X support for 10/100 */ | ||
771 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
772 | if (ret_val) | ||
773 | return ret_val; | ||
774 | |||
775 | data &= ~IFE_PMC_AUTO_MDIX; | ||
776 | data &= ~IFE_PMC_FORCE_MDIX; | ||
777 | |||
778 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | ||
779 | if (ret_val) | ||
780 | return ret_val; | ||
781 | |||
782 | hw_dbg(hw, "IFE PMC: %X\n", data); | ||
783 | |||
784 | udelay(1); | ||
785 | |||
786 | if (phy->autoneg_wait_to_complete) { | ||
787 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); | ||
788 | |||
789 | ret_val = e1000e_phy_has_link_generic(hw, | ||
790 | PHY_FORCE_LIMIT, | ||
791 | 100000, | ||
792 | &link); | ||
793 | if (ret_val) | ||
794 | return ret_val; | ||
795 | |||
796 | if (!link) | ||
797 | hw_dbg(hw, "Link taking longer than expected.\n"); | ||
798 | |||
799 | /* Try once more */ | ||
800 | ret_val = e1000e_phy_has_link_generic(hw, | ||
801 | PHY_FORCE_LIMIT, | ||
802 | 100000, | ||
803 | &link); | ||
804 | if (ret_val) | ||
805 | return ret_val; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration | 780 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration |
813 | * @hw: pointer to the HW structure | 781 | * @hw: pointer to the HW structure |
814 | * | 782 | * |
@@ -822,7 +790,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
822 | s32 ret_val; | 790 | s32 ret_val; |
823 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | 791 | u16 word_addr, reg_data, reg_addr, phy_page = 0; |
824 | 792 | ||
825 | ret_val = hw->phy.ops.acquire_phy(hw); | 793 | ret_val = hw->phy.ops.acquire(hw); |
826 | if (ret_val) | 794 | if (ret_val) |
827 | return ret_val; | 795 | return ret_val; |
828 | 796 | ||
@@ -918,7 +886,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
918 | reg_addr &= PHY_REG_MASK; | 886 | reg_addr &= PHY_REG_MASK; |
919 | reg_addr |= phy_page; | 887 | reg_addr |= phy_page; |
920 | 888 | ||
921 | ret_val = phy->ops.write_phy_reg_locked(hw, | 889 | ret_val = phy->ops.write_reg_locked(hw, |
922 | (u32)reg_addr, | 890 | (u32)reg_addr, |
923 | reg_data); | 891 | reg_data); |
924 | if (ret_val) | 892 | if (ret_val) |
@@ -927,7 +895,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
927 | } | 895 | } |
928 | 896 | ||
929 | out: | 897 | out: |
930 | hw->phy.ops.release_phy(hw); | 898 | hw->phy.ops.release(hw); |
931 | return ret_val; | 899 | return ret_val; |
932 | } | 900 | } |
933 | 901 | ||
@@ -951,15 +919,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
951 | goto out; | 919 | goto out; |
952 | 920 | ||
953 | /* Wrap the whole flow with the sw flag */ | 921 | /* Wrap the whole flow with the sw flag */ |
954 | ret_val = hw->phy.ops.acquire_phy(hw); | 922 | ret_val = hw->phy.ops.acquire(hw); |
955 | if (ret_val) | 923 | if (ret_val) |
956 | goto out; | 924 | goto out; |
957 | 925 | ||
958 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | 926 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ |
959 | if (link) { | 927 | if (link) { |
960 | if (hw->phy.type == e1000_phy_82578) { | 928 | if (hw->phy.type == e1000_phy_82578) { |
961 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 929 | ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, |
962 | BM_CS_STATUS, | ||
963 | &status_reg); | 930 | &status_reg); |
964 | if (ret_val) | 931 | if (ret_val) |
965 | goto release; | 932 | goto release; |
@@ -975,8 +942,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
975 | } | 942 | } |
976 | 943 | ||
977 | if (hw->phy.type == e1000_phy_82577) { | 944 | if (hw->phy.type == e1000_phy_82577) { |
978 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 945 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, |
979 | HV_M_STATUS, | ||
980 | &status_reg); | 946 | &status_reg); |
981 | if (ret_val) | 947 | if (ret_val) |
982 | goto release; | 948 | goto release; |
@@ -992,14 +958,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
992 | } | 958 | } |
993 | 959 | ||
994 | /* Link stall fix for link up */ | 960 | /* Link stall fix for link up */ |
995 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 961 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
996 | 0x0100); | 962 | 0x0100); |
997 | if (ret_val) | 963 | if (ret_val) |
998 | goto release; | 964 | goto release; |
999 | 965 | ||
1000 | } else { | 966 | } else { |
1001 | /* Link stall fix for link down */ | 967 | /* Link stall fix for link down */ |
1002 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 968 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
1003 | 0x4100); | 969 | 0x4100); |
1004 | if (ret_val) | 970 | if (ret_val) |
1005 | goto release; | 971 | goto release; |
@@ -1008,7 +974,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
1008 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); | 974 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); |
1009 | 975 | ||
1010 | release: | 976 | release: |
1011 | hw->phy.ops.release_phy(hw); | 977 | hw->phy.ops.release(hw); |
1012 | out: | 978 | out: |
1013 | return ret_val; | 979 | return ret_val; |
1014 | } | 980 | } |
@@ -1023,7 +989,7 @@ out: | |||
1023 | * | 989 | * |
1024 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | 990 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) |
1025 | **/ | 991 | **/ |
1026 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | 992 | s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) |
1027 | { | 993 | { |
1028 | s32 ret_val = 0; | 994 | s32 ret_val = 0; |
1029 | u32 ctrl_reg = 0; | 995 | u32 ctrl_reg = 0; |
@@ -1084,7 +1050,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1084 | if (hw->mac.type != e1000_pchlan) | 1050 | if (hw->mac.type != e1000_pchlan) |
1085 | return ret_val; | 1051 | return ret_val; |
1086 | 1052 | ||
1087 | ret_val = hw->phy.ops.acquire_phy(hw); | 1053 | ret_val = hw->phy.ops.acquire(hw); |
1088 | if (ret_val) | 1054 | if (ret_val) |
1089 | return ret_val; | 1055 | return ret_val; |
1090 | 1056 | ||
@@ -1098,7 +1064,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1098 | 1064 | ||
1099 | mac_reg = er32(PHY_CTRL); | 1065 | mac_reg = er32(PHY_CTRL); |
1100 | 1066 | ||
1101 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); | 1067 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); |
1102 | if (ret_val) | 1068 | if (ret_val) |
1103 | goto out; | 1069 | goto out; |
1104 | 1070 | ||
@@ -1120,26 +1086,54 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1120 | /* Restart auto-neg to activate the bits */ | 1086 | /* Restart auto-neg to activate the bits */ |
1121 | if (!e1000_check_reset_block(hw)) | 1087 | if (!e1000_check_reset_block(hw)) |
1122 | oem_reg |= HV_OEM_BITS_RESTART_AN; | 1088 | oem_reg |= HV_OEM_BITS_RESTART_AN; |
1123 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); | 1089 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); |
1124 | 1090 | ||
1125 | out: | 1091 | out: |
1126 | hw->phy.ops.release_phy(hw); | 1092 | hw->phy.ops.release(hw); |
1127 | 1093 | ||
1128 | return ret_val; | 1094 | return ret_val; |
1129 | } | 1095 | } |
1130 | 1096 | ||
1131 | 1097 | ||
1132 | /** | 1098 | /** |
1099 | * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode | ||
1100 | * @hw: pointer to the HW structure | ||
1101 | **/ | ||
1102 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) | ||
1103 | { | ||
1104 | s32 ret_val; | ||
1105 | u16 data; | ||
1106 | |||
1107 | ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); | ||
1108 | if (ret_val) | ||
1109 | return ret_val; | ||
1110 | |||
1111 | data |= HV_KMRN_MDIO_SLOW; | ||
1112 | |||
1113 | ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); | ||
1114 | |||
1115 | return ret_val; | ||
1116 | } | ||
1117 | |||
1118 | /** | ||
1133 | * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be | 1119 | * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be |
1134 | * done after every PHY reset. | 1120 | * done after every PHY reset. |
1135 | **/ | 1121 | **/ |
1136 | static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | 1122 | static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) |
1137 | { | 1123 | { |
1138 | s32 ret_val = 0; | 1124 | s32 ret_val = 0; |
1125 | u16 phy_data; | ||
1139 | 1126 | ||
1140 | if (hw->mac.type != e1000_pchlan) | 1127 | if (hw->mac.type != e1000_pchlan) |
1141 | return ret_val; | 1128 | return ret_val; |
1142 | 1129 | ||
1130 | /* Set MDIO slow mode before any other MDIO access */ | ||
1131 | if (hw->phy.type == e1000_phy_82577) { | ||
1132 | ret_val = e1000_set_mdio_slow_mode_hv(hw); | ||
1133 | if (ret_val) | ||
1134 | goto out; | ||
1135 | } | ||
1136 | |||
1143 | if (((hw->phy.type == e1000_phy_82577) && | 1137 | if (((hw->phy.type == e1000_phy_82577) && |
1144 | ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || | 1138 | ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || |
1145 | ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { | 1139 | ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { |
@@ -1166,22 +1160,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1166 | } | 1160 | } |
1167 | 1161 | ||
1168 | /* Select page 0 */ | 1162 | /* Select page 0 */ |
1169 | ret_val = hw->phy.ops.acquire_phy(hw); | 1163 | ret_val = hw->phy.ops.acquire(hw); |
1170 | if (ret_val) | 1164 | if (ret_val) |
1171 | return ret_val; | 1165 | return ret_val; |
1172 | 1166 | ||
1173 | hw->phy.addr = 1; | 1167 | hw->phy.addr = 1; |
1174 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | 1168 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); |
1169 | hw->phy.ops.release(hw); | ||
1175 | if (ret_val) | 1170 | if (ret_val) |
1176 | goto out; | 1171 | goto out; |
1177 | hw->phy.ops.release_phy(hw); | ||
1178 | 1172 | ||
1179 | /* | 1173 | /* |
1180 | * Configure the K1 Si workaround during phy reset assuming there is | 1174 | * Configure the K1 Si workaround during phy reset assuming there is |
1181 | * link so that it disables K1 if link is in 1Gbps. | 1175 | * link so that it disables K1 if link is in 1Gbps. |
1182 | */ | 1176 | */ |
1183 | ret_val = e1000_k1_gig_workaround_hv(hw, true); | 1177 | ret_val = e1000_k1_gig_workaround_hv(hw, true); |
1178 | if (ret_val) | ||
1179 | goto out; | ||
1184 | 1180 | ||
1181 | /* Workaround for link disconnects on a busy hub in half duplex */ | ||
1182 | ret_val = hw->phy.ops.acquire(hw); | ||
1183 | if (ret_val) | ||
1184 | goto out; | ||
1185 | ret_val = hw->phy.ops.read_reg_locked(hw, | ||
1186 | PHY_REG(BM_PORT_CTRL_PAGE, 17), | ||
1187 | &phy_data); | ||
1188 | if (ret_val) | ||
1189 | goto release; | ||
1190 | ret_val = hw->phy.ops.write_reg_locked(hw, | ||
1191 | PHY_REG(BM_PORT_CTRL_PAGE, 17), | ||
1192 | phy_data & 0x00FF); | ||
1193 | release: | ||
1194 | hw->phy.ops.release(hw); | ||
1185 | out: | 1195 | out: |
1186 | return ret_val; | 1196 | return ret_val; |
1187 | } | 1197 | } |
@@ -1210,7 +1220,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | |||
1210 | * leave the PHY in a bad state possibly resulting in no link. | 1220 | * leave the PHY in a bad state possibly resulting in no link. |
1211 | */ | 1221 | */ |
1212 | if (loop == 0) | 1222 | if (loop == 0) |
1213 | hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); | 1223 | e_dbg("LAN_INIT_DONE not set, increase timeout\n"); |
1214 | 1224 | ||
1215 | /* Clear the Init Done bit for the next init event */ | 1225 | /* Clear the Init Done bit for the next init event */ |
1216 | data = er32(STATUS); | 1226 | data = er32(STATUS); |
@@ -1238,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
1238 | /* Allow time for h/w to get to a quiescent state after reset */ | 1248 | /* Allow time for h/w to get to a quiescent state after reset */ |
1239 | mdelay(10); | 1249 | mdelay(10); |
1240 | 1250 | ||
1251 | /* Perform any necessary post-reset workarounds */ | ||
1241 | if (hw->mac.type == e1000_pchlan) { | 1252 | if (hw->mac.type == e1000_pchlan) { |
1242 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | 1253 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); |
1243 | if (ret_val) | 1254 | if (ret_val) |
@@ -1262,122 +1273,6 @@ out: | |||
1262 | } | 1273 | } |
1263 | 1274 | ||
1264 | /** | 1275 | /** |
1265 | * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states | ||
1266 | * @hw: pointer to the HW structure | ||
1267 | * | ||
1268 | * Populates "phy" structure with various feature states. | ||
1269 | * This function is only called by other family-specific | ||
1270 | * routines. | ||
1271 | **/ | ||
1272 | static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) | ||
1273 | { | ||
1274 | struct e1000_phy_info *phy = &hw->phy; | ||
1275 | s32 ret_val; | ||
1276 | u16 data; | ||
1277 | bool link; | ||
1278 | |||
1279 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
1280 | if (ret_val) | ||
1281 | return ret_val; | ||
1282 | |||
1283 | if (!link) { | ||
1284 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | ||
1285 | return -E1000_ERR_CONFIG; | ||
1286 | } | ||
1287 | |||
1288 | ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | ||
1289 | if (ret_val) | ||
1290 | return ret_val; | ||
1291 | phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE)); | ||
1292 | |||
1293 | if (phy->polarity_correction) { | ||
1294 | ret_val = phy->ops.check_polarity(hw); | ||
1295 | if (ret_val) | ||
1296 | return ret_val; | ||
1297 | } else { | ||
1298 | /* Polarity is forced */ | ||
1299 | phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | ||
1300 | ? e1000_rev_polarity_reversed | ||
1301 | : e1000_rev_polarity_normal; | ||
1302 | } | ||
1303 | |||
1304 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
1305 | if (ret_val) | ||
1306 | return ret_val; | ||
1307 | |||
1308 | phy->is_mdix = (data & IFE_PMC_MDIX_STATUS); | ||
1309 | |||
1310 | /* The following parameters are undefined for 10/100 operation. */ | ||
1311 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
1312 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
1313 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | /** | ||
1319 | * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info | ||
1320 | * @hw: pointer to the HW structure | ||
1321 | * | ||
1322 | * Wrapper for calling the get_phy_info routines for the appropriate phy type. | ||
1323 | * This is a function pointer entry point called by drivers | ||
1324 | * or other shared routines. | ||
1325 | **/ | ||
1326 | static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) | ||
1327 | { | ||
1328 | switch (hw->phy.type) { | ||
1329 | case e1000_phy_ife: | ||
1330 | return e1000_get_phy_info_ife_ich8lan(hw); | ||
1331 | break; | ||
1332 | case e1000_phy_igp_3: | ||
1333 | case e1000_phy_bm: | ||
1334 | case e1000_phy_82578: | ||
1335 | case e1000_phy_82577: | ||
1336 | return e1000e_get_phy_info_igp(hw); | ||
1337 | break; | ||
1338 | default: | ||
1339 | break; | ||
1340 | } | ||
1341 | |||
1342 | return -E1000_ERR_PHY_TYPE; | ||
1343 | } | ||
1344 | |||
1345 | /** | ||
1346 | * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY | ||
1347 | * @hw: pointer to the HW structure | ||
1348 | * | ||
1349 | * Polarity is determined on the polarity reversal feature being enabled. | ||
1350 | * This function is only called by other family-specific | ||
1351 | * routines. | ||
1352 | **/ | ||
1353 | static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) | ||
1354 | { | ||
1355 | struct e1000_phy_info *phy = &hw->phy; | ||
1356 | s32 ret_val; | ||
1357 | u16 phy_data, offset, mask; | ||
1358 | |||
1359 | /* | ||
1360 | * Polarity is determined based on the reversal feature being enabled. | ||
1361 | */ | ||
1362 | if (phy->polarity_correction) { | ||
1363 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | ||
1364 | mask = IFE_PESC_POLARITY_REVERSED; | ||
1365 | } else { | ||
1366 | offset = IFE_PHY_SPECIAL_CONTROL; | ||
1367 | mask = IFE_PSC_FORCE_POLARITY; | ||
1368 | } | ||
1369 | |||
1370 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1371 | |||
1372 | if (!ret_val) | ||
1373 | phy->cable_polarity = (phy_data & mask) | ||
1374 | ? e1000_rev_polarity_reversed | ||
1375 | : e1000_rev_polarity_normal; | ||
1376 | |||
1377 | return ret_val; | ||
1378 | } | ||
1379 | |||
1380 | /** | ||
1381 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state | 1276 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state |
1382 | * @hw: pointer to the HW structure | 1277 | * @hw: pointer to the HW structure |
1383 | * @active: true to enable LPLU, false to disable | 1278 | * @active: true to enable LPLU, false to disable |
@@ -1412,7 +1307,7 @@ out: | |||
1412 | /** | 1307 | /** |
1413 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state | 1308 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state |
1414 | * @hw: pointer to the HW structure | 1309 | * @hw: pointer to the HW structure |
1415 | * @active: TRUE to enable LPLU, FALSE to disable | 1310 | * @active: true to enable LPLU, false to disable |
1416 | * | 1311 | * |
1417 | * Sets the LPLU D0 state according to the active flag. When | 1312 | * Sets the LPLU D0 state according to the active flag. When |
1418 | * activating LPLU this function also disables smart speed | 1313 | * activating LPLU this function also disables smart speed |
@@ -1498,7 +1393,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
1498 | /** | 1393 | /** |
1499 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state | 1394 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state |
1500 | * @hw: pointer to the HW structure | 1395 | * @hw: pointer to the HW structure |
1501 | * @active: TRUE to enable LPLU, FALSE to disable | 1396 | * @active: true to enable LPLU, false to disable |
1502 | * | 1397 | * |
1503 | * Sets the LPLU D3 state according to the active flag. When | 1398 | * Sets the LPLU D3 state according to the active flag. When |
1504 | * activating LPLU this function also disables smart speed | 1399 | * activating LPLU this function also disables smart speed |
@@ -1611,7 +1506,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1611 | 1506 | ||
1612 | return 0; | 1507 | return 0; |
1613 | } | 1508 | } |
1614 | hw_dbg(hw, "Unable to determine valid NVM bank via EEC - " | 1509 | e_dbg("Unable to determine valid NVM bank via EEC - " |
1615 | "reading flash signature\n"); | 1510 | "reading flash signature\n"); |
1616 | /* fall-thru */ | 1511 | /* fall-thru */ |
1617 | default: | 1512 | default: |
@@ -1641,7 +1536,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1641 | return 0; | 1536 | return 0; |
1642 | } | 1537 | } |
1643 | 1538 | ||
1644 | hw_dbg(hw, "ERROR: No valid NVM bank present\n"); | 1539 | e_dbg("ERROR: No valid NVM bank present\n"); |
1645 | return -E1000_ERR_NVM; | 1540 | return -E1000_ERR_NVM; |
1646 | } | 1541 | } |
1647 | 1542 | ||
@@ -1669,16 +1564,16 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1669 | 1564 | ||
1670 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1565 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1671 | (words == 0)) { | 1566 | (words == 0)) { |
1672 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1567 | e_dbg("nvm parameter(s) out of bounds\n"); |
1673 | ret_val = -E1000_ERR_NVM; | 1568 | ret_val = -E1000_ERR_NVM; |
1674 | goto out; | 1569 | goto out; |
1675 | } | 1570 | } |
1676 | 1571 | ||
1677 | nvm->ops.acquire_nvm(hw); | 1572 | nvm->ops.acquire(hw); |
1678 | 1573 | ||
1679 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1574 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1680 | if (ret_val) { | 1575 | if (ret_val) { |
1681 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1576 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
1682 | bank = 0; | 1577 | bank = 0; |
1683 | } | 1578 | } |
1684 | 1579 | ||
@@ -1700,11 +1595,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1700 | } | 1595 | } |
1701 | } | 1596 | } |
1702 | 1597 | ||
1703 | nvm->ops.release_nvm(hw); | 1598 | nvm->ops.release(hw); |
1704 | 1599 | ||
1705 | out: | 1600 | out: |
1706 | if (ret_val) | 1601 | if (ret_val) |
1707 | hw_dbg(hw, "NVM read error: %d\n", ret_val); | 1602 | e_dbg("NVM read error: %d\n", ret_val); |
1708 | 1603 | ||
1709 | return ret_val; | 1604 | return ret_val; |
1710 | } | 1605 | } |
@@ -1726,7 +1621,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1726 | 1621 | ||
1727 | /* Check if the flash descriptor is valid */ | 1622 | /* Check if the flash descriptor is valid */ |
1728 | if (hsfsts.hsf_status.fldesvalid == 0) { | 1623 | if (hsfsts.hsf_status.fldesvalid == 0) { |
1729 | hw_dbg(hw, "Flash descriptor invalid. " | 1624 | e_dbg("Flash descriptor invalid. " |
1730 | "SW Sequencing must be used."); | 1625 | "SW Sequencing must be used."); |
1731 | return -E1000_ERR_NVM; | 1626 | return -E1000_ERR_NVM; |
1732 | } | 1627 | } |
@@ -1749,7 +1644,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1749 | if (hsfsts.hsf_status.flcinprog == 0) { | 1644 | if (hsfsts.hsf_status.flcinprog == 0) { |
1750 | /* | 1645 | /* |
1751 | * There is no cycle running at present, | 1646 | * There is no cycle running at present, |
1752 | * so we can start a cycle | 1647 | * so we can start a cycle. |
1753 | * Begin by setting Flash Cycle Done. | 1648 | * Begin by setting Flash Cycle Done. |
1754 | */ | 1649 | */ |
1755 | hsfsts.hsf_status.flcdone = 1; | 1650 | hsfsts.hsf_status.flcdone = 1; |
@@ -1757,7 +1652,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1757 | ret_val = 0; | 1652 | ret_val = 0; |
1758 | } else { | 1653 | } else { |
1759 | /* | 1654 | /* |
1760 | * otherwise poll for sometime so the current | 1655 | * Otherwise poll for sometime so the current |
1761 | * cycle has a chance to end before giving up. | 1656 | * cycle has a chance to end before giving up. |
1762 | */ | 1657 | */ |
1763 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { | 1658 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { |
@@ -1776,7 +1671,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1776 | hsfsts.hsf_status.flcdone = 1; | 1671 | hsfsts.hsf_status.flcdone = 1; |
1777 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 1672 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
1778 | } else { | 1673 | } else { |
1779 | hw_dbg(hw, "Flash controller busy, cannot get access"); | 1674 | e_dbg("Flash controller busy, cannot get access"); |
1780 | } | 1675 | } |
1781 | } | 1676 | } |
1782 | 1677 | ||
@@ -1926,7 +1821,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1926 | /* Repeat for some time before giving up. */ | 1821 | /* Repeat for some time before giving up. */ |
1927 | continue; | 1822 | continue; |
1928 | } else if (hsfsts.hsf_status.flcdone == 0) { | 1823 | } else if (hsfsts.hsf_status.flcdone == 0) { |
1929 | hw_dbg(hw, "Timeout error - flash cycle " | 1824 | e_dbg("Timeout error - flash cycle " |
1930 | "did not complete."); | 1825 | "did not complete."); |
1931 | break; | 1826 | break; |
1932 | } | 1827 | } |
@@ -1954,18 +1849,18 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1954 | 1849 | ||
1955 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1850 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1956 | (words == 0)) { | 1851 | (words == 0)) { |
1957 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1852 | e_dbg("nvm parameter(s) out of bounds\n"); |
1958 | return -E1000_ERR_NVM; | 1853 | return -E1000_ERR_NVM; |
1959 | } | 1854 | } |
1960 | 1855 | ||
1961 | nvm->ops.acquire_nvm(hw); | 1856 | nvm->ops.acquire(hw); |
1962 | 1857 | ||
1963 | for (i = 0; i < words; i++) { | 1858 | for (i = 0; i < words; i++) { |
1964 | dev_spec->shadow_ram[offset+i].modified = 1; | 1859 | dev_spec->shadow_ram[offset+i].modified = true; |
1965 | dev_spec->shadow_ram[offset+i].value = data[i]; | 1860 | dev_spec->shadow_ram[offset+i].value = data[i]; |
1966 | } | 1861 | } |
1967 | 1862 | ||
1968 | nvm->ops.release_nvm(hw); | 1863 | nvm->ops.release(hw); |
1969 | 1864 | ||
1970 | return 0; | 1865 | return 0; |
1971 | } | 1866 | } |
@@ -1996,7 +1891,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1996 | if (nvm->type != e1000_nvm_flash_sw) | 1891 | if (nvm->type != e1000_nvm_flash_sw) |
1997 | goto out; | 1892 | goto out; |
1998 | 1893 | ||
1999 | nvm->ops.acquire_nvm(hw); | 1894 | nvm->ops.acquire(hw); |
2000 | 1895 | ||
2001 | /* | 1896 | /* |
2002 | * We're writing to the opposite bank so if we're on bank 1, | 1897 | * We're writing to the opposite bank so if we're on bank 1, |
@@ -2005,7 +1900,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2005 | */ | 1900 | */ |
2006 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1901 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
2007 | if (ret_val) { | 1902 | if (ret_val) { |
2008 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1903 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
2009 | bank = 0; | 1904 | bank = 0; |
2010 | } | 1905 | } |
2011 | 1906 | ||
@@ -2014,7 +1909,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2014 | old_bank_offset = 0; | 1909 | old_bank_offset = 0; |
2015 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | 1910 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); |
2016 | if (ret_val) { | 1911 | if (ret_val) { |
2017 | nvm->ops.release_nvm(hw); | 1912 | nvm->ops.release(hw); |
2018 | goto out; | 1913 | goto out; |
2019 | } | 1914 | } |
2020 | } else { | 1915 | } else { |
@@ -2022,7 +1917,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2022 | new_bank_offset = 0; | 1917 | new_bank_offset = 0; |
2023 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | 1918 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); |
2024 | if (ret_val) { | 1919 | if (ret_val) { |
2025 | nvm->ops.release_nvm(hw); | 1920 | nvm->ops.release(hw); |
2026 | goto out; | 1921 | goto out; |
2027 | } | 1922 | } |
2028 | } | 1923 | } |
@@ -2079,8 +1974,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2079 | */ | 1974 | */ |
2080 | if (ret_val) { | 1975 | if (ret_val) { |
2081 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | 1976 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ |
2082 | hw_dbg(hw, "Flash commit failed.\n"); | 1977 | e_dbg("Flash commit failed.\n"); |
2083 | nvm->ops.release_nvm(hw); | 1978 | nvm->ops.release(hw); |
2084 | goto out; | 1979 | goto out; |
2085 | } | 1980 | } |
2086 | 1981 | ||
@@ -2093,7 +1988,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2093 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | 1988 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; |
2094 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); | 1989 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); |
2095 | if (ret_val) { | 1990 | if (ret_val) { |
2096 | nvm->ops.release_nvm(hw); | 1991 | nvm->ops.release(hw); |
2097 | goto out; | 1992 | goto out; |
2098 | } | 1993 | } |
2099 | data &= 0xBFFF; | 1994 | data &= 0xBFFF; |
@@ -2101,7 +1996,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2101 | act_offset * 2 + 1, | 1996 | act_offset * 2 + 1, |
2102 | (u8)(data >> 8)); | 1997 | (u8)(data >> 8)); |
2103 | if (ret_val) { | 1998 | if (ret_val) { |
2104 | nvm->ops.release_nvm(hw); | 1999 | nvm->ops.release(hw); |
2105 | goto out; | 2000 | goto out; |
2106 | } | 2001 | } |
2107 | 2002 | ||
@@ -2114,17 +2009,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2114 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | 2009 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; |
2115 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | 2010 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); |
2116 | if (ret_val) { | 2011 | if (ret_val) { |
2117 | nvm->ops.release_nvm(hw); | 2012 | nvm->ops.release(hw); |
2118 | goto out; | 2013 | goto out; |
2119 | } | 2014 | } |
2120 | 2015 | ||
2121 | /* Great! Everything worked, we can now clear the cached entries. */ | 2016 | /* Great! Everything worked, we can now clear the cached entries. */ |
2122 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | 2017 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { |
2123 | dev_spec->shadow_ram[i].modified = 0; | 2018 | dev_spec->shadow_ram[i].modified = false; |
2124 | dev_spec->shadow_ram[i].value = 0xFFFF; | 2019 | dev_spec->shadow_ram[i].value = 0xFFFF; |
2125 | } | 2020 | } |
2126 | 2021 | ||
2127 | nvm->ops.release_nvm(hw); | 2022 | nvm->ops.release(hw); |
2128 | 2023 | ||
2129 | /* | 2024 | /* |
2130 | * Reload the EEPROM, or else modifications will not appear | 2025 | * Reload the EEPROM, or else modifications will not appear |
@@ -2135,7 +2030,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2135 | 2030 | ||
2136 | out: | 2031 | out: |
2137 | if (ret_val) | 2032 | if (ret_val) |
2138 | hw_dbg(hw, "NVM update error: %d\n", ret_val); | 2033 | e_dbg("NVM update error: %d\n", ret_val); |
2139 | 2034 | ||
2140 | return ret_val; | 2035 | return ret_val; |
2141 | } | 2036 | } |
@@ -2193,7 +2088,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2193 | union ich8_hws_flash_status hsfsts; | 2088 | union ich8_hws_flash_status hsfsts; |
2194 | u32 gfpreg; | 2089 | u32 gfpreg; |
2195 | 2090 | ||
2196 | nvm->ops.acquire_nvm(hw); | 2091 | nvm->ops.acquire(hw); |
2197 | 2092 | ||
2198 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 2093 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
2199 | 2094 | ||
@@ -2214,7 +2109,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2214 | hsfsts.hsf_status.flockdn = true; | 2109 | hsfsts.hsf_status.flockdn = true; |
2215 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 2110 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
2216 | 2111 | ||
2217 | nvm->ops.release_nvm(hw); | 2112 | nvm->ops.release(hw); |
2218 | } | 2113 | } |
2219 | 2114 | ||
2220 | /** | 2115 | /** |
@@ -2285,7 +2180,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
2285 | /* Repeat for some time before giving up. */ | 2180 | /* Repeat for some time before giving up. */ |
2286 | continue; | 2181 | continue; |
2287 | if (hsfsts.hsf_status.flcdone == 0) { | 2182 | if (hsfsts.hsf_status.flcdone == 0) { |
2288 | hw_dbg(hw, "Timeout error - flash cycle " | 2183 | e_dbg("Timeout error - flash cycle " |
2289 | "did not complete."); | 2184 | "did not complete."); |
2290 | break; | 2185 | break; |
2291 | } | 2186 | } |
@@ -2330,7 +2225,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | |||
2330 | return ret_val; | 2225 | return ret_val; |
2331 | 2226 | ||
2332 | for (program_retries = 0; program_retries < 100; program_retries++) { | 2227 | for (program_retries = 0; program_retries < 100; program_retries++) { |
2333 | hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); | 2228 | e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); |
2334 | udelay(100); | 2229 | udelay(100); |
2335 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | 2230 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); |
2336 | if (!ret_val) | 2231 | if (!ret_val) |
@@ -2360,9 +2255,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
2360 | u32 flash_bank_size = nvm->flash_bank_size * 2; | 2255 | u32 flash_bank_size = nvm->flash_bank_size * 2; |
2361 | s32 ret_val; | 2256 | s32 ret_val; |
2362 | s32 count = 0; | 2257 | s32 count = 0; |
2363 | s32 iteration; | 2258 | s32 j, iteration, sector_size; |
2364 | s32 sector_size; | ||
2365 | s32 j; | ||
2366 | 2259 | ||
2367 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | 2260 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); |
2368 | 2261 | ||
@@ -2465,7 +2358,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) | |||
2465 | 2358 | ||
2466 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 2359 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
2467 | if (ret_val) { | 2360 | if (ret_val) { |
2468 | hw_dbg(hw, "NVM Read Error\n"); | 2361 | e_dbg("NVM Read Error\n"); |
2469 | return ret_val; | 2362 | return ret_val; |
2470 | } | 2363 | } |
2471 | 2364 | ||
@@ -2595,10 +2488,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2595 | */ | 2488 | */ |
2596 | ret_val = e1000e_disable_pcie_master(hw); | 2489 | ret_val = e1000e_disable_pcie_master(hw); |
2597 | if (ret_val) { | 2490 | if (ret_val) { |
2598 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 2491 | e_dbg("PCI-E Master disable polling has failed.\n"); |
2599 | } | 2492 | } |
2600 | 2493 | ||
2601 | hw_dbg(hw, "Masking off all interrupts\n"); | 2494 | e_dbg("Masking off all interrupts\n"); |
2602 | ew32(IMC, 0xffffffff); | 2495 | ew32(IMC, 0xffffffff); |
2603 | 2496 | ||
2604 | /* | 2497 | /* |
@@ -2649,14 +2542,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2649 | ctrl |= E1000_CTRL_PHY_RST; | 2542 | ctrl |= E1000_CTRL_PHY_RST; |
2650 | } | 2543 | } |
2651 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 2544 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
2652 | /* Whether or not the swflag was acquired, we need to reset the part */ | 2545 | e_dbg("Issuing a global reset to ich8lan\n"); |
2653 | hw_dbg(hw, "Issuing a global reset to ich8lan\n"); | ||
2654 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 2546 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
2655 | msleep(20); | 2547 | msleep(20); |
2656 | 2548 | ||
2657 | if (!ret_val) | 2549 | if (!ret_val) |
2658 | e1000_release_swflag_ich8lan(hw); | 2550 | e1000_release_swflag_ich8lan(hw); |
2659 | 2551 | ||
2552 | /* Perform any necessary post-reset workarounds */ | ||
2553 | if (hw->mac.type == e1000_pchlan) | ||
2554 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | ||
2555 | |||
2660 | if (ctrl & E1000_CTRL_PHY_RST) | 2556 | if (ctrl & E1000_CTRL_PHY_RST) |
2661 | ret_val = hw->phy.ops.get_cfg_done(hw); | 2557 | ret_val = hw->phy.ops.get_cfg_done(hw); |
2662 | 2558 | ||
@@ -2670,7 +2566,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2670 | * return with an error. This can happen in situations | 2566 | * return with an error. This can happen in situations |
2671 | * where there is no eeprom and prevents getting link. | 2567 | * where there is no eeprom and prevents getting link. |
2672 | */ | 2568 | */ |
2673 | hw_dbg(hw, "Auto Read Done did not complete\n"); | 2569 | e_dbg("Auto Read Done did not complete\n"); |
2674 | } | 2570 | } |
2675 | } | 2571 | } |
2676 | /* Dummy read to clear the phy wakeup bit after lcd reset */ | 2572 | /* Dummy read to clear the phy wakeup bit after lcd reset */ |
@@ -2701,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2701 | kab |= E1000_KABGTXD_BGSQLBIAS; | 2597 | kab |= E1000_KABGTXD_BGSQLBIAS; |
2702 | ew32(KABGTXD, kab); | 2598 | ew32(KABGTXD, kab); |
2703 | 2599 | ||
2704 | if (hw->mac.type == e1000_pchlan) | ||
2705 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | ||
2706 | |||
2707 | out: | 2600 | out: |
2708 | return ret_val; | 2601 | return ret_val; |
2709 | } | 2602 | } |
@@ -2731,16 +2624,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2731 | 2624 | ||
2732 | /* Initialize identification LED */ | 2625 | /* Initialize identification LED */ |
2733 | ret_val = mac->ops.id_led_init(hw); | 2626 | ret_val = mac->ops.id_led_init(hw); |
2734 | if (ret_val) { | 2627 | if (ret_val) |
2735 | hw_dbg(hw, "Error initializing identification LED\n"); | 2628 | e_dbg("Error initializing identification LED\n"); |
2736 | return ret_val; | 2629 | /* This is not fatal and we should not stop init due to this */ |
2737 | } | ||
2738 | 2630 | ||
2739 | /* Setup the receive address. */ | 2631 | /* Setup the receive address. */ |
2740 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 2632 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
2741 | 2633 | ||
2742 | /* Zero out the Multicast HASH table */ | 2634 | /* Zero out the Multicast HASH table */ |
2743 | hw_dbg(hw, "Zeroing the MTA\n"); | 2635 | e_dbg("Zeroing the MTA\n"); |
2744 | for (i = 0; i < mac->mta_reg_count; i++) | 2636 | for (i = 0; i < mac->mta_reg_count; i++) |
2745 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 2637 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
2746 | 2638 | ||
@@ -2750,7 +2642,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2750 | * Reset the phy after disabling host wakeup to reset the Rx buffer. | 2642 | * Reset the phy after disabling host wakeup to reset the Rx buffer. |
2751 | */ | 2643 | */ |
2752 | if (hw->phy.type == e1000_phy_82578) { | 2644 | if (hw->phy.type == e1000_phy_82578) { |
2753 | hw->phy.ops.read_phy_reg(hw, BM_WUC, &i); | 2645 | hw->phy.ops.read_reg(hw, BM_WUC, &i); |
2754 | ret_val = e1000_phy_hw_reset_ich8lan(hw); | 2646 | ret_val = e1000_phy_hw_reset_ich8lan(hw); |
2755 | if (ret_val) | 2647 | if (ret_val) |
2756 | return ret_val; | 2648 | return ret_val; |
@@ -2848,6 +2740,16 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | |||
2848 | reg &= ~(1 << 31); | 2740 | reg &= ~(1 << 31); |
2849 | ew32(STATUS, reg); | 2741 | ew32(STATUS, reg); |
2850 | } | 2742 | } |
2743 | |||
2744 | /* | ||
2745 | * work-around descriptor data corruption issue during nfs v2 udp | ||
2746 | * traffic, just disable the nfs filtering capability | ||
2747 | */ | ||
2748 | reg = er32(RFCTL); | ||
2749 | reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); | ||
2750 | ew32(RFCTL, reg); | ||
2751 | |||
2752 | return; | ||
2851 | } | 2753 | } |
2852 | 2754 | ||
2853 | /** | 2755 | /** |
@@ -2886,7 +2788,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2886 | */ | 2788 | */ |
2887 | hw->fc.current_mode = hw->fc.requested_mode; | 2789 | hw->fc.current_mode = hw->fc.requested_mode; |
2888 | 2790 | ||
2889 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 2791 | e_dbg("After fix-ups FlowControl is now = %x\n", |
2890 | hw->fc.current_mode); | 2792 | hw->fc.current_mode); |
2891 | 2793 | ||
2892 | /* Continue to configure the copper link. */ | 2794 | /* Continue to configure the copper link. */ |
@@ -2897,7 +2799,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2897 | ew32(FCTTV, hw->fc.pause_time); | 2799 | ew32(FCTTV, hw->fc.pause_time); |
2898 | if ((hw->phy.type == e1000_phy_82578) || | 2800 | if ((hw->phy.type == e1000_phy_82578) || |
2899 | (hw->phy.type == e1000_phy_82577)) { | 2801 | (hw->phy.type == e1000_phy_82577)) { |
2900 | ret_val = hw->phy.ops.write_phy_reg(hw, | 2802 | ret_val = hw->phy.ops.write_reg(hw, |
2901 | PHY_REG(BM_PORT_CTRL_PAGE, 27), | 2803 | PHY_REG(BM_PORT_CTRL_PAGE, 27), |
2902 | hw->fc.pause_time); | 2804 | hw->fc.pause_time); |
2903 | if (ret_val) | 2805 | if (ret_val) |
@@ -2931,14 +2833,16 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2931 | * and increase the max iterations when polling the phy; | 2833 | * and increase the max iterations when polling the phy; |
2932 | * this fixes erroneous timeouts at 10Mbps. | 2834 | * this fixes erroneous timeouts at 10Mbps. |
2933 | */ | 2835 | */ |
2934 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); | 2836 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); |
2935 | if (ret_val) | 2837 | if (ret_val) |
2936 | return ret_val; | 2838 | return ret_val; |
2937 | ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); | 2839 | ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
2840 | ®_data); | ||
2938 | if (ret_val) | 2841 | if (ret_val) |
2939 | return ret_val; | 2842 | return ret_val; |
2940 | reg_data |= 0x3F; | 2843 | reg_data |= 0x3F; |
2941 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); | 2844 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
2845 | reg_data); | ||
2942 | if (ret_val) | 2846 | if (ret_val) |
2943 | return ret_val; | 2847 | return ret_val; |
2944 | 2848 | ||
@@ -2960,7 +2864,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2960 | return ret_val; | 2864 | return ret_val; |
2961 | break; | 2865 | break; |
2962 | case e1000_phy_ife: | 2866 | case e1000_phy_ife: |
2963 | ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2867 | ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, |
2964 | ®_data); | 2868 | ®_data); |
2965 | if (ret_val) | 2869 | if (ret_val) |
2966 | return ret_val; | 2870 | return ret_val; |
@@ -2979,7 +2883,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2979 | reg_data |= IFE_PMC_AUTO_MDIX; | 2883 | reg_data |= IFE_PMC_AUTO_MDIX; |
2980 | break; | 2884 | break; |
2981 | } | 2885 | } |
2982 | ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2886 | ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, |
2983 | reg_data); | 2887 | reg_data); |
2984 | if (ret_val) | 2888 | if (ret_val) |
2985 | return ret_val; | 2889 | return ret_val; |
@@ -3092,8 +2996,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
3092 | * @hw: pointer to the HW structure | 2996 | * @hw: pointer to the HW structure |
3093 | * @state: boolean value used to set the current Kumeran workaround state | 2997 | * @state: boolean value used to set the current Kumeran workaround state |
3094 | * | 2998 | * |
3095 | * If ICH8, set the current Kumeran workaround state (enabled - TRUE | 2999 | * If ICH8, set the current Kumeran workaround state (enabled - true |
3096 | * /disabled - FALSE). | 3000 | * /disabled - false). |
3097 | **/ | 3001 | **/ |
3098 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | 3002 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, |
3099 | bool state) | 3003 | bool state) |
@@ -3101,7 +3005,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
3101 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 3005 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
3102 | 3006 | ||
3103 | if (hw->mac.type != e1000_ich8lan) { | 3007 | if (hw->mac.type != e1000_ich8lan) { |
3104 | hw_dbg(hw, "Workaround applies to ICH8 only.\n"); | 3008 | e_dbg("Workaround applies to ICH8 only.\n"); |
3105 | return; | 3009 | return; |
3106 | } | 3010 | } |
3107 | 3011 | ||
@@ -3209,6 +3113,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) | |||
3209 | u32 phy_ctrl; | 3113 | u32 phy_ctrl; |
3210 | 3114 | ||
3211 | switch (hw->mac.type) { | 3115 | switch (hw->mac.type) { |
3116 | case e1000_ich8lan: | ||
3212 | case e1000_ich9lan: | 3117 | case e1000_ich9lan: |
3213 | case e1000_ich10lan: | 3118 | case e1000_ich10lan: |
3214 | case e1000_pchlan: | 3119 | case e1000_pchlan: |
@@ -3281,7 +3186,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | |||
3281 | **/ | 3186 | **/ |
3282 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | 3187 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) |
3283 | { | 3188 | { |
3284 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3189 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3285 | (u16)hw->mac.ledctl_mode1); | 3190 | (u16)hw->mac.ledctl_mode1); |
3286 | } | 3191 | } |
3287 | 3192 | ||
@@ -3293,7 +3198,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | |||
3293 | **/ | 3198 | **/ |
3294 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) | 3199 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) |
3295 | { | 3200 | { |
3296 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3201 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3297 | (u16)hw->mac.ledctl_default); | 3202 | (u16)hw->mac.ledctl_default); |
3298 | } | 3203 | } |
3299 | 3204 | ||
@@ -3325,7 +3230,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) | |||
3325 | } | 3230 | } |
3326 | } | 3231 | } |
3327 | 3232 | ||
3328 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3233 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3329 | } | 3234 | } |
3330 | 3235 | ||
3331 | /** | 3236 | /** |
@@ -3356,7 +3261,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) | |||
3356 | } | 3261 | } |
3357 | } | 3262 | } |
3358 | 3263 | ||
3359 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3264 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3360 | } | 3265 | } |
3361 | 3266 | ||
3362 | /** | 3267 | /** |
@@ -3379,8 +3284,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3379 | if (status & E1000_STATUS_PHYRA) | 3284 | if (status & E1000_STATUS_PHYRA) |
3380 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); | 3285 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); |
3381 | else | 3286 | else |
3382 | hw_dbg(hw, | 3287 | e_dbg("PHY Reset Asserted not set - needs delay\n"); |
3383 | "PHY Reset Asserted not set - needs delay\n"); | ||
3384 | } | 3288 | } |
3385 | 3289 | ||
3386 | e1000e_get_cfg_done(hw); | 3290 | e1000e_get_cfg_done(hw); |
@@ -3395,7 +3299,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3395 | } else { | 3299 | } else { |
3396 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { | 3300 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { |
3397 | /* Maybe we should do a basic PHY config */ | 3301 | /* Maybe we should do a basic PHY config */ |
3398 | hw_dbg(hw, "EEPROM not present\n"); | 3302 | e_dbg("EEPROM not present\n"); |
3399 | return -E1000_ERR_CONFIG; | 3303 | return -E1000_ERR_CONFIG; |
3400 | } | 3304 | } |
3401 | } | 3305 | } |
@@ -3404,6 +3308,23 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3404 | } | 3308 | } |
3405 | 3309 | ||
3406 | /** | 3310 | /** |
3311 | * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down | ||
3312 | * @hw: pointer to the HW structure | ||
3313 | * | ||
3314 | * In the case of a PHY power down to save power, or to turn off link during a | ||
3315 | * driver unload, or wake on lan is not enabled, remove the link. | ||
3316 | **/ | ||
3317 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) | ||
3318 | { | ||
3319 | /* If the management interface is not enabled, then power down */ | ||
3320 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
3321 | hw->phy.ops.check_reset_block(hw))) | ||
3322 | e1000_power_down_phy_copper(hw); | ||
3323 | |||
3324 | return; | ||
3325 | } | ||
3326 | |||
3327 | /** | ||
3407 | * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters | 3328 | * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters |
3408 | * @hw: pointer to the HW structure | 3329 | * @hw: pointer to the HW structure |
3409 | * | 3330 | * |
@@ -3412,42 +3333,41 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3412 | **/ | 3333 | **/ |
3413 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) | 3334 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) |
3414 | { | 3335 | { |
3415 | u32 temp; | ||
3416 | u16 phy_data; | 3336 | u16 phy_data; |
3417 | 3337 | ||
3418 | e1000e_clear_hw_cntrs_base(hw); | 3338 | e1000e_clear_hw_cntrs_base(hw); |
3419 | 3339 | ||
3420 | temp = er32(ALGNERRC); | 3340 | er32(ALGNERRC); |
3421 | temp = er32(RXERRC); | 3341 | er32(RXERRC); |
3422 | temp = er32(TNCRS); | 3342 | er32(TNCRS); |
3423 | temp = er32(CEXTERR); | 3343 | er32(CEXTERR); |
3424 | temp = er32(TSCTC); | 3344 | er32(TSCTC); |
3425 | temp = er32(TSCTFC); | 3345 | er32(TSCTFC); |
3426 | 3346 | ||
3427 | temp = er32(MGTPRC); | 3347 | er32(MGTPRC); |
3428 | temp = er32(MGTPDC); | 3348 | er32(MGTPDC); |
3429 | temp = er32(MGTPTC); | 3349 | er32(MGTPTC); |
3430 | 3350 | ||
3431 | temp = er32(IAC); | 3351 | er32(IAC); |
3432 | temp = er32(ICRXOC); | 3352 | er32(ICRXOC); |
3433 | 3353 | ||
3434 | /* Clear PHY statistics registers */ | 3354 | /* Clear PHY statistics registers */ |
3435 | if ((hw->phy.type == e1000_phy_82578) || | 3355 | if ((hw->phy.type == e1000_phy_82578) || |
3436 | (hw->phy.type == e1000_phy_82577)) { | 3356 | (hw->phy.type == e1000_phy_82577)) { |
3437 | hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data); | 3357 | hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); |
3438 | hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data); | 3358 | hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); |
3439 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data); | 3359 | hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); |
3440 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data); | 3360 | hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); |
3441 | hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data); | 3361 | hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); |
3442 | hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data); | 3362 | hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); |
3443 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data); | 3363 | hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); |
3444 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data); | 3364 | hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); |
3445 | hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data); | 3365 | hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); |
3446 | hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data); | 3366 | hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); |
3447 | hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data); | 3367 | hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); |
3448 | hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data); | 3368 | hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); |
3449 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data); | 3369 | hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); |
3450 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data); | 3370 | hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); |
3451 | } | 3371 | } |
3452 | } | 3372 | } |
3453 | 3373 | ||
@@ -3458,6 +3378,7 @@ static struct e1000_mac_operations ich8_mac_ops = { | |||
3458 | /* cleanup_led dependent on mac type */ | 3378 | /* cleanup_led dependent on mac type */ |
3459 | .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, | 3379 | .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, |
3460 | .get_bus_info = e1000_get_bus_info_ich8lan, | 3380 | .get_bus_info = e1000_get_bus_info_ich8lan, |
3381 | .set_lan_id = e1000_set_lan_id_single_port, | ||
3461 | .get_link_up_info = e1000_get_link_up_info_ich8lan, | 3382 | .get_link_up_info = e1000_get_link_up_info_ich8lan, |
3462 | /* led_on dependent on mac type */ | 3383 | /* led_on dependent on mac type */ |
3463 | /* led_off dependent on mac type */ | 3384 | /* led_off dependent on mac type */ |
@@ -3470,29 +3391,27 @@ static struct e1000_mac_operations ich8_mac_ops = { | |||
3470 | }; | 3391 | }; |
3471 | 3392 | ||
3472 | static struct e1000_phy_operations ich8_phy_ops = { | 3393 | static struct e1000_phy_operations ich8_phy_ops = { |
3473 | .acquire_phy = e1000_acquire_swflag_ich8lan, | 3394 | .acquire = e1000_acquire_swflag_ich8lan, |
3474 | .check_reset_block = e1000_check_reset_block_ich8lan, | 3395 | .check_reset_block = e1000_check_reset_block_ich8lan, |
3475 | .commit_phy = NULL, | 3396 | .commit = NULL, |
3476 | .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, | ||
3477 | .get_cfg_done = e1000_get_cfg_done_ich8lan, | 3397 | .get_cfg_done = e1000_get_cfg_done_ich8lan, |
3478 | .get_cable_length = e1000e_get_cable_length_igp_2, | 3398 | .get_cable_length = e1000e_get_cable_length_igp_2, |
3479 | .get_phy_info = e1000_get_phy_info_ich8lan, | 3399 | .read_reg = e1000e_read_phy_reg_igp, |
3480 | .read_phy_reg = e1000e_read_phy_reg_igp, | 3400 | .release = e1000_release_swflag_ich8lan, |
3481 | .release_phy = e1000_release_swflag_ich8lan, | 3401 | .reset = e1000_phy_hw_reset_ich8lan, |
3482 | .reset_phy = e1000_phy_hw_reset_ich8lan, | ||
3483 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, | 3402 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, |
3484 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, | 3403 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, |
3485 | .write_phy_reg = e1000e_write_phy_reg_igp, | 3404 | .write_reg = e1000e_write_phy_reg_igp, |
3486 | }; | 3405 | }; |
3487 | 3406 | ||
3488 | static struct e1000_nvm_operations ich8_nvm_ops = { | 3407 | static struct e1000_nvm_operations ich8_nvm_ops = { |
3489 | .acquire_nvm = e1000_acquire_nvm_ich8lan, | 3408 | .acquire = e1000_acquire_nvm_ich8lan, |
3490 | .read_nvm = e1000_read_nvm_ich8lan, | 3409 | .read = e1000_read_nvm_ich8lan, |
3491 | .release_nvm = e1000_release_nvm_ich8lan, | 3410 | .release = e1000_release_nvm_ich8lan, |
3492 | .update_nvm = e1000_update_nvm_checksum_ich8lan, | 3411 | .update = e1000_update_nvm_checksum_ich8lan, |
3493 | .valid_led_default = e1000_valid_led_default_ich8lan, | 3412 | .valid_led_default = e1000_valid_led_default_ich8lan, |
3494 | .validate_nvm = e1000_validate_nvm_checksum_ich8lan, | 3413 | .validate = e1000_validate_nvm_checksum_ich8lan, |
3495 | .write_nvm = e1000_write_nvm_ich8lan, | 3414 | .write = e1000_write_nvm_ich8lan, |
3496 | }; | 3415 | }; |
3497 | 3416 | ||
3498 | struct e1000_info e1000_ich8_info = { | 3417 | struct e1000_info e1000_ich8_info = { |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 99ba2b8a2a05..a8b2c0de27c4 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -26,11 +26,6 @@ | |||
26 | 26 | ||
27 | *******************************************************************************/ | 27 | *******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/pci.h> | ||
33 | |||
34 | #include "e1000.h" | 29 | #include "e1000.h" |
35 | 30 | ||
36 | enum e1000_mng_mode { | 31 | enum e1000_mng_mode { |
@@ -56,10 +51,10 @@ enum e1000_mng_mode { | |||
56 | **/ | 51 | **/ |
57 | s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | 52 | s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) |
58 | { | 53 | { |
54 | struct e1000_mac_info *mac = &hw->mac; | ||
59 | struct e1000_bus_info *bus = &hw->bus; | 55 | struct e1000_bus_info *bus = &hw->bus; |
60 | struct e1000_adapter *adapter = hw->adapter; | 56 | struct e1000_adapter *adapter = hw->adapter; |
61 | u32 status; | 57 | u16 pcie_link_status, cap_offset; |
62 | u16 pcie_link_status, pci_header_type, cap_offset; | ||
63 | 58 | ||
64 | cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); | 59 | cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); |
65 | if (!cap_offset) { | 60 | if (!cap_offset) { |
@@ -73,21 +68,64 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | |||
73 | PCIE_LINK_WIDTH_SHIFT); | 68 | PCIE_LINK_WIDTH_SHIFT); |
74 | } | 69 | } |
75 | 70 | ||
76 | pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, | 71 | mac->ops.set_lan_id(hw); |
77 | &pci_header_type); | ||
78 | if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { | ||
79 | status = er32(STATUS); | ||
80 | bus->func = (status & E1000_STATUS_FUNC_MASK) | ||
81 | >> E1000_STATUS_FUNC_SHIFT; | ||
82 | } else { | ||
83 | bus->func = 0; | ||
84 | } | ||
85 | 72 | ||
86 | return 0; | 73 | return 0; |
87 | } | 74 | } |
88 | 75 | ||
89 | /** | 76 | /** |
90 | * e1000e_write_vfta - Write value to VLAN filter table | 77 | * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices |
78 | * | ||
79 | * @hw: pointer to the HW structure | ||
80 | * | ||
81 | * Determines the LAN function id by reading memory-mapped registers | ||
82 | * and swaps the port value if requested. | ||
83 | **/ | ||
84 | void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) | ||
85 | { | ||
86 | struct e1000_bus_info *bus = &hw->bus; | ||
87 | u32 reg; | ||
88 | |||
89 | /* | ||
90 | * The status register reports the correct function number | ||
91 | * for the device regardless of function swap state. | ||
92 | */ | ||
93 | reg = er32(STATUS); | ||
94 | bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * e1000_set_lan_id_single_port - Set LAN id for a single port device | ||
99 | * @hw: pointer to the HW structure | ||
100 | * | ||
101 | * Sets the LAN function id to zero for a single port device. | ||
102 | **/ | ||
103 | void e1000_set_lan_id_single_port(struct e1000_hw *hw) | ||
104 | { | ||
105 | struct e1000_bus_info *bus = &hw->bus; | ||
106 | |||
107 | bus->func = 0; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * e1000_clear_vfta_generic - Clear VLAN filter table | ||
112 | * @hw: pointer to the HW structure | ||
113 | * | ||
114 | * Clears the register array which contains the VLAN filter table by | ||
115 | * setting all the values to 0. | ||
116 | **/ | ||
117 | void e1000_clear_vfta_generic(struct e1000_hw *hw) | ||
118 | { | ||
119 | u32 offset; | ||
120 | |||
121 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | ||
122 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); | ||
123 | e1e_flush(); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * e1000_write_vfta_generic - Write value to VLAN filter table | ||
91 | * @hw: pointer to the HW structure | 129 | * @hw: pointer to the HW structure |
92 | * @offset: register offset in VLAN filter table | 130 | * @offset: register offset in VLAN filter table |
93 | * @value: register value written to VLAN filter table | 131 | * @value: register value written to VLAN filter table |
@@ -95,7 +133,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | |||
95 | * Writes value at the given offset in the register array which stores | 133 | * Writes value at the given offset in the register array which stores |
96 | * the VLAN filter table. | 134 | * the VLAN filter table. |
97 | **/ | 135 | **/ |
98 | void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | 136 | void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) |
99 | { | 137 | { |
100 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); | 138 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); |
101 | e1e_flush(); | 139 | e1e_flush(); |
@@ -113,20 +151,79 @@ void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | |||
113 | void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | 151 | void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) |
114 | { | 152 | { |
115 | u32 i; | 153 | u32 i; |
154 | u8 mac_addr[ETH_ALEN] = {0}; | ||
116 | 155 | ||
117 | /* Setup the receive address */ | 156 | /* Setup the receive address */ |
118 | hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); | 157 | e_dbg("Programming MAC Address into RAR[0]\n"); |
119 | 158 | ||
120 | e1000e_rar_set(hw, hw->mac.addr, 0); | 159 | e1000e_rar_set(hw, hw->mac.addr, 0); |
121 | 160 | ||
122 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 161 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
123 | hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); | 162 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
124 | for (i = 1; i < rar_count; i++) { | 163 | for (i = 1; i < rar_count; i++) |
125 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 164 | e1000e_rar_set(hw, mac_addr, i); |
126 | e1e_flush(); | 165 | } |
127 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); | 166 | |
128 | e1e_flush(); | 167 | /** |
168 | * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr | ||
169 | * @hw: pointer to the HW structure | ||
170 | * | ||
171 | * Checks the nvm for an alternate MAC address. An alternate MAC address | ||
172 | * can be setup by pre-boot software and must be treated like a permanent | ||
173 | * address and must override the actual permanent MAC address. If an | ||
174 | * alternate MAC address is found it is programmed into RAR0, replacing | ||
175 | * the permanent address that was installed into RAR0 by the Si on reset. | ||
176 | * This function will return SUCCESS unless it encounters an error while | ||
177 | * reading the EEPROM. | ||
178 | **/ | ||
179 | s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) | ||
180 | { | ||
181 | u32 i; | ||
182 | s32 ret_val = 0; | ||
183 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; | ||
184 | u8 alt_mac_addr[ETH_ALEN]; | ||
185 | |||
186 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | ||
187 | &nvm_alt_mac_addr_offset); | ||
188 | if (ret_val) { | ||
189 | e_dbg("NVM Read Error\n"); | ||
190 | goto out; | ||
129 | } | 191 | } |
192 | |||
193 | if (nvm_alt_mac_addr_offset == 0xFFFF) { | ||
194 | /* There is no Alternate MAC Address */ | ||
195 | goto out; | ||
196 | } | ||
197 | |||
198 | if (hw->bus.func == E1000_FUNC_1) | ||
199 | nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; | ||
200 | for (i = 0; i < ETH_ALEN; i += 2) { | ||
201 | offset = nvm_alt_mac_addr_offset + (i >> 1); | ||
202 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | ||
203 | if (ret_val) { | ||
204 | e_dbg("NVM Read Error\n"); | ||
205 | goto out; | ||
206 | } | ||
207 | |||
208 | alt_mac_addr[i] = (u8)(nvm_data & 0xFF); | ||
209 | alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); | ||
210 | } | ||
211 | |||
212 | /* if multicast bit is set, the alternate address will not be used */ | ||
213 | if (alt_mac_addr[0] & 0x01) { | ||
214 | e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * We have a valid alternate MAC address, and we want to treat it the | ||
220 | * same as the normal permanent MAC address stored by the HW into the | ||
221 | * RAR. Do this by mapping this address into RAR0. | ||
222 | */ | ||
223 | e1000e_rar_set(hw, alt_mac_addr, 0); | ||
224 | |||
225 | out: | ||
226 | return ret_val; | ||
130 | } | 227 | } |
131 | 228 | ||
132 | /** | 229 | /** |
@@ -152,10 +249,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
152 | 249 | ||
153 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | 250 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); |
154 | 251 | ||
155 | rar_high |= E1000_RAH_AV; | 252 | /* If MAC address zero, no need to set the AV bit */ |
253 | if (rar_low || rar_high) | ||
254 | rar_high |= E1000_RAH_AV; | ||
156 | 255 | ||
157 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); | 256 | /* |
158 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); | 257 | * Some bridges will combine consecutive 32-bit writes into |
258 | * a single burst write, which will malfunction on some parts. | ||
259 | * The flushes avoid this. | ||
260 | */ | ||
261 | ew32(RAL(index), rar_low); | ||
262 | e1e_flush(); | ||
263 | ew32(RAH(index), rar_high); | ||
264 | e1e_flush(); | ||
159 | } | 265 | } |
160 | 266 | ||
161 | /** | 267 | /** |
@@ -234,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
234 | * @hw: pointer to the HW structure | 340 | * @hw: pointer to the HW structure |
235 | * @mc_addr_list: array of multicast addresses to program | 341 | * @mc_addr_list: array of multicast addresses to program |
236 | * @mc_addr_count: number of multicast addresses to program | 342 | * @mc_addr_count: number of multicast addresses to program |
237 | * @rar_used_count: the first RAR register free to program | ||
238 | * @rar_count: total number of supported Receive Address Registers | ||
239 | * | 343 | * |
240 | * Updates the Receive Address Registers and Multicast Table Array. | 344 | * Updates entire Multicast Table Array. |
241 | * The caller must have a packed mc_addr_list of multicast addresses. | 345 | * The caller must have a packed mc_addr_list of multicast addresses. |
242 | * The parameter rar_count will usually be hw->mac.rar_entry_count | ||
243 | * unless there are workarounds that change this. | ||
244 | **/ | 346 | **/ |
245 | void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | 347 | void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
246 | u8 *mc_addr_list, u32 mc_addr_count, | 348 | u8 *mc_addr_list, u32 mc_addr_count) |
247 | u32 rar_used_count, u32 rar_count) | ||
248 | { | 349 | { |
249 | u32 i; | 350 | u32 hash_value, hash_bit, hash_reg; |
250 | u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC); | 351 | int i; |
251 | 352 | ||
252 | if (!mcarray) { | 353 | /* clear mta_shadow */ |
253 | printk(KERN_ERR "multicast array memory allocation failed\n"); | 354 | memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); |
254 | return; | ||
255 | } | ||
256 | 355 | ||
257 | /* | 356 | /* update mta_shadow from mc_addr_list */ |
258 | * Load the first set of multicast addresses into the exact | 357 | for (i = 0; (u32) i < mc_addr_count; i++) { |
259 | * filters (RAR). If there are not enough to fill the RAR | ||
260 | * array, clear the filters. | ||
261 | */ | ||
262 | for (i = rar_used_count; i < rar_count; i++) { | ||
263 | if (mc_addr_count) { | ||
264 | e1000e_rar_set(hw, mc_addr_list, i); | ||
265 | mc_addr_count--; | ||
266 | mc_addr_list += ETH_ALEN; | ||
267 | } else { | ||
268 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); | ||
269 | e1e_flush(); | ||
270 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); | ||
271 | e1e_flush(); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | /* Load any remaining multicast addresses into the hash table. */ | ||
276 | for (; mc_addr_count > 0; mc_addr_count--) { | ||
277 | u32 hash_value, hash_reg, hash_bit, mta; | ||
278 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | 358 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
279 | hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); | 359 | |
280 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | 360 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
281 | hash_bit = hash_value & 0x1F; | 361 | hash_bit = hash_value & 0x1F; |
282 | mta = (1 << hash_bit); | ||
283 | mcarray[hash_reg] |= mta; | ||
284 | mc_addr_list += ETH_ALEN; | ||
285 | } | ||
286 | 362 | ||
287 | /* write the hash table completely */ | 363 | hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); |
288 | for (i = 0; i < hw->mac.mta_reg_count; i++) | 364 | mc_addr_list += (ETH_ALEN); |
289 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]); | 365 | } |
290 | 366 | ||
367 | /* replace the entire MTA table */ | ||
368 | for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) | ||
369 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); | ||
291 | e1e_flush(); | 370 | e1e_flush(); |
292 | kfree(mcarray); | ||
293 | } | 371 | } |
294 | 372 | ||
295 | /** | 373 | /** |
@@ -300,45 +378,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
300 | **/ | 378 | **/ |
301 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) | 379 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) |
302 | { | 380 | { |
303 | u32 temp; | 381 | er32(CRCERRS); |
304 | 382 | er32(SYMERRS); | |
305 | temp = er32(CRCERRS); | 383 | er32(MPC); |
306 | temp = er32(SYMERRS); | 384 | er32(SCC); |
307 | temp = er32(MPC); | 385 | er32(ECOL); |
308 | temp = er32(SCC); | 386 | er32(MCC); |
309 | temp = er32(ECOL); | 387 | er32(LATECOL); |
310 | temp = er32(MCC); | 388 | er32(COLC); |
311 | temp = er32(LATECOL); | 389 | er32(DC); |
312 | temp = er32(COLC); | 390 | er32(SEC); |
313 | temp = er32(DC); | 391 | er32(RLEC); |
314 | temp = er32(SEC); | 392 | er32(XONRXC); |
315 | temp = er32(RLEC); | 393 | er32(XONTXC); |
316 | temp = er32(XONRXC); | 394 | er32(XOFFRXC); |
317 | temp = er32(XONTXC); | 395 | er32(XOFFTXC); |
318 | temp = er32(XOFFRXC); | 396 | er32(FCRUC); |
319 | temp = er32(XOFFTXC); | 397 | er32(GPRC); |
320 | temp = er32(FCRUC); | 398 | er32(BPRC); |
321 | temp = er32(GPRC); | 399 | er32(MPRC); |
322 | temp = er32(BPRC); | 400 | er32(GPTC); |
323 | temp = er32(MPRC); | 401 | er32(GORCL); |
324 | temp = er32(GPTC); | 402 | er32(GORCH); |
325 | temp = er32(GORCL); | 403 | er32(GOTCL); |
326 | temp = er32(GORCH); | 404 | er32(GOTCH); |
327 | temp = er32(GOTCL); | 405 | er32(RNBC); |
328 | temp = er32(GOTCH); | 406 | er32(RUC); |
329 | temp = er32(RNBC); | 407 | er32(RFC); |
330 | temp = er32(RUC); | 408 | er32(ROC); |
331 | temp = er32(RFC); | 409 | er32(RJC); |
332 | temp = er32(ROC); | 410 | er32(TORL); |
333 | temp = er32(RJC); | 411 | er32(TORH); |
334 | temp = er32(TORL); | 412 | er32(TOTL); |
335 | temp = er32(TORH); | 413 | er32(TOTH); |
336 | temp = er32(TOTL); | 414 | er32(TPR); |
337 | temp = er32(TOTH); | 415 | er32(TPT); |
338 | temp = er32(TPR); | 416 | er32(MPTC); |
339 | temp = er32(TPT); | 417 | er32(BPTC); |
340 | temp = er32(MPTC); | ||
341 | temp = er32(BPTC); | ||
342 | } | 418 | } |
343 | 419 | ||
344 | /** | 420 | /** |
@@ -376,7 +452,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
376 | if (!link) | 452 | if (!link) |
377 | return ret_val; /* No link detected */ | 453 | return ret_val; /* No link detected */ |
378 | 454 | ||
379 | mac->get_link_status = 0; | 455 | mac->get_link_status = false; |
380 | 456 | ||
381 | /* | 457 | /* |
382 | * Check if there was DownShift, must be checked | 458 | * Check if there was DownShift, must be checked |
@@ -408,7 +484,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
408 | */ | 484 | */ |
409 | ret_val = e1000e_config_fc_after_link_up(hw); | 485 | ret_val = e1000e_config_fc_after_link_up(hw); |
410 | if (ret_val) { | 486 | if (ret_val) { |
411 | hw_dbg(hw, "Error configuring flow control\n"); | 487 | e_dbg("Error configuring flow control\n"); |
412 | } | 488 | } |
413 | 489 | ||
414 | return ret_val; | 490 | return ret_val; |
@@ -448,7 +524,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
448 | mac->autoneg_failed = 1; | 524 | mac->autoneg_failed = 1; |
449 | return 0; | 525 | return 0; |
450 | } | 526 | } |
451 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 527 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
452 | 528 | ||
453 | /* Disable auto-negotiation in the TXCW register */ | 529 | /* Disable auto-negotiation in the TXCW register */ |
454 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 530 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -461,7 +537,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
461 | /* Configure Flow Control after forcing link up. */ | 537 | /* Configure Flow Control after forcing link up. */ |
462 | ret_val = e1000e_config_fc_after_link_up(hw); | 538 | ret_val = e1000e_config_fc_after_link_up(hw); |
463 | if (ret_val) { | 539 | if (ret_val) { |
464 | hw_dbg(hw, "Error configuring flow control\n"); | 540 | e_dbg("Error configuring flow control\n"); |
465 | return ret_val; | 541 | return ret_val; |
466 | } | 542 | } |
467 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 543 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -471,7 +547,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
471 | * and disable forced link in the Device Control register | 547 | * and disable forced link in the Device Control register |
472 | * in an attempt to auto-negotiate with our link partner. | 548 | * in an attempt to auto-negotiate with our link partner. |
473 | */ | 549 | */ |
474 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 550 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
475 | ew32(TXCW, mac->txcw); | 551 | ew32(TXCW, mac->txcw); |
476 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 552 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
477 | 553 | ||
@@ -513,7 +589,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
513 | mac->autoneg_failed = 1; | 589 | mac->autoneg_failed = 1; |
514 | return 0; | 590 | return 0; |
515 | } | 591 | } |
516 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 592 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
517 | 593 | ||
518 | /* Disable auto-negotiation in the TXCW register */ | 594 | /* Disable auto-negotiation in the TXCW register */ |
519 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 595 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -526,7 +602,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
526 | /* Configure Flow Control after forcing link up. */ | 602 | /* Configure Flow Control after forcing link up. */ |
527 | ret_val = e1000e_config_fc_after_link_up(hw); | 603 | ret_val = e1000e_config_fc_after_link_up(hw); |
528 | if (ret_val) { | 604 | if (ret_val) { |
529 | hw_dbg(hw, "Error configuring flow control\n"); | 605 | e_dbg("Error configuring flow control\n"); |
530 | return ret_val; | 606 | return ret_val; |
531 | } | 607 | } |
532 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 608 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -536,7 +612,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
536 | * and disable forced link in the Device Control register | 612 | * and disable forced link in the Device Control register |
537 | * in an attempt to auto-negotiate with our link partner. | 613 | * in an attempt to auto-negotiate with our link partner. |
538 | */ | 614 | */ |
539 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 615 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
540 | ew32(TXCW, mac->txcw); | 616 | ew32(TXCW, mac->txcw); |
541 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 617 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
542 | 618 | ||
@@ -553,11 +629,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
553 | if (rxcw & E1000_RXCW_SYNCH) { | 629 | if (rxcw & E1000_RXCW_SYNCH) { |
554 | if (!(rxcw & E1000_RXCW_IV)) { | 630 | if (!(rxcw & E1000_RXCW_IV)) { |
555 | mac->serdes_has_link = true; | 631 | mac->serdes_has_link = true; |
556 | hw_dbg(hw, "SERDES: Link up - forced.\n"); | 632 | e_dbg("SERDES: Link up - forced.\n"); |
557 | } | 633 | } |
558 | } else { | 634 | } else { |
559 | mac->serdes_has_link = false; | 635 | mac->serdes_has_link = false; |
560 | hw_dbg(hw, "SERDES: Link down - force failed.\n"); | 636 | e_dbg("SERDES: Link down - force failed.\n"); |
561 | } | 637 | } |
562 | } | 638 | } |
563 | 639 | ||
@@ -570,20 +646,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
570 | if (rxcw & E1000_RXCW_SYNCH) { | 646 | if (rxcw & E1000_RXCW_SYNCH) { |
571 | if (!(rxcw & E1000_RXCW_IV)) { | 647 | if (!(rxcw & E1000_RXCW_IV)) { |
572 | mac->serdes_has_link = true; | 648 | mac->serdes_has_link = true; |
573 | hw_dbg(hw, "SERDES: Link up - autoneg " | 649 | e_dbg("SERDES: Link up - autoneg " |
574 | "completed sucessfully.\n"); | 650 | "completed successfully.\n"); |
575 | } else { | 651 | } else { |
576 | mac->serdes_has_link = false; | 652 | mac->serdes_has_link = false; |
577 | hw_dbg(hw, "SERDES: Link down - invalid" | 653 | e_dbg("SERDES: Link down - invalid" |
578 | "codewords detected in autoneg.\n"); | 654 | "codewords detected in autoneg.\n"); |
579 | } | 655 | } |
580 | } else { | 656 | } else { |
581 | mac->serdes_has_link = false; | 657 | mac->serdes_has_link = false; |
582 | hw_dbg(hw, "SERDES: Link down - no sync.\n"); | 658 | e_dbg("SERDES: Link down - no sync.\n"); |
583 | } | 659 | } |
584 | } else { | 660 | } else { |
585 | mac->serdes_has_link = false; | 661 | mac->serdes_has_link = false; |
586 | hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); | 662 | e_dbg("SERDES: Link down - autoneg failed\n"); |
587 | } | 663 | } |
588 | } | 664 | } |
589 | 665 | ||
@@ -614,7 +690,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
614 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | 690 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
615 | 691 | ||
616 | if (ret_val) { | 692 | if (ret_val) { |
617 | hw_dbg(hw, "NVM Read Error\n"); | 693 | e_dbg("NVM Read Error\n"); |
618 | return ret_val; | 694 | return ret_val; |
619 | } | 695 | } |
620 | 696 | ||
@@ -667,7 +743,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
667 | */ | 743 | */ |
668 | hw->fc.current_mode = hw->fc.requested_mode; | 744 | hw->fc.current_mode = hw->fc.requested_mode; |
669 | 745 | ||
670 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 746 | e_dbg("After fix-ups FlowControl is now = %x\n", |
671 | hw->fc.current_mode); | 747 | hw->fc.current_mode); |
672 | 748 | ||
673 | /* Call the necessary media_type subroutine to configure the link. */ | 749 | /* Call the necessary media_type subroutine to configure the link. */ |
@@ -681,7 +757,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
681 | * control is disabled, because it does not hurt anything to | 757 | * control is disabled, because it does not hurt anything to |
682 | * initialize these registers. | 758 | * initialize these registers. |
683 | */ | 759 | */ |
684 | hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); | 760 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
685 | ew32(FCT, FLOW_CONTROL_TYPE); | 761 | ew32(FCT, FLOW_CONTROL_TYPE); |
686 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 762 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
687 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 763 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
@@ -751,7 +827,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
751 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 827 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
752 | break; | 828 | break; |
753 | default: | 829 | default: |
754 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 830 | e_dbg("Flow control param set incorrectly\n"); |
755 | return -E1000_ERR_CONFIG; | 831 | return -E1000_ERR_CONFIG; |
756 | break; | 832 | break; |
757 | } | 833 | } |
@@ -789,7 +865,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
789 | break; | 865 | break; |
790 | } | 866 | } |
791 | if (i == FIBER_LINK_UP_LIMIT) { | 867 | if (i == FIBER_LINK_UP_LIMIT) { |
792 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 868 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
793 | mac->autoneg_failed = 1; | 869 | mac->autoneg_failed = 1; |
794 | /* | 870 | /* |
795 | * AutoNeg failed to achieve a link, so we'll call | 871 | * AutoNeg failed to achieve a link, so we'll call |
@@ -799,13 +875,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
799 | */ | 875 | */ |
800 | ret_val = mac->ops.check_for_link(hw); | 876 | ret_val = mac->ops.check_for_link(hw); |
801 | if (ret_val) { | 877 | if (ret_val) { |
802 | hw_dbg(hw, "Error while checking for link\n"); | 878 | e_dbg("Error while checking for link\n"); |
803 | return ret_val; | 879 | return ret_val; |
804 | } | 880 | } |
805 | mac->autoneg_failed = 0; | 881 | mac->autoneg_failed = 0; |
806 | } else { | 882 | } else { |
807 | mac->autoneg_failed = 0; | 883 | mac->autoneg_failed = 0; |
808 | hw_dbg(hw, "Valid Link Found\n"); | 884 | e_dbg("Valid Link Found\n"); |
809 | } | 885 | } |
810 | 886 | ||
811 | return 0; | 887 | return 0; |
@@ -841,7 +917,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
841 | * then the link-up status bit will be set and the flow control enable | 917 | * then the link-up status bit will be set and the flow control enable |
842 | * bits (RFCE and TFCE) will be set according to their negotiated value. | 918 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
843 | */ | 919 | */ |
844 | hw_dbg(hw, "Auto-negotiation enabled\n"); | 920 | e_dbg("Auto-negotiation enabled\n"); |
845 | 921 | ||
846 | ew32(CTRL, ctrl); | 922 | ew32(CTRL, ctrl); |
847 | e1e_flush(); | 923 | e1e_flush(); |
@@ -856,7 +932,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
856 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 932 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
857 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 933 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
858 | } else { | 934 | } else { |
859 | hw_dbg(hw, "No signal detected\n"); | 935 | e_dbg("No signal detected\n"); |
860 | } | 936 | } |
861 | 937 | ||
862 | return 0; | 938 | return 0; |
@@ -952,7 +1028,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
952 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 1028 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
953 | * other: No other values should be possible at this point. | 1029 | * other: No other values should be possible at this point. |
954 | */ | 1030 | */ |
955 | hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); | 1031 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
956 | 1032 | ||
957 | switch (hw->fc.current_mode) { | 1033 | switch (hw->fc.current_mode) { |
958 | case e1000_fc_none: | 1034 | case e1000_fc_none: |
@@ -970,7 +1046,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
970 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | 1046 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
971 | break; | 1047 | break; |
972 | default: | 1048 | default: |
973 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 1049 | e_dbg("Flow control param set incorrectly\n"); |
974 | return -E1000_ERR_CONFIG; | 1050 | return -E1000_ERR_CONFIG; |
975 | } | 1051 | } |
976 | 1052 | ||
@@ -1011,7 +1087,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1011 | } | 1087 | } |
1012 | 1088 | ||
1013 | if (ret_val) { | 1089 | if (ret_val) { |
1014 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1090 | e_dbg("Error forcing flow control settings\n"); |
1015 | return ret_val; | 1091 | return ret_val; |
1016 | } | 1092 | } |
1017 | 1093 | ||
@@ -1035,7 +1111,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1035 | return ret_val; | 1111 | return ret_val; |
1036 | 1112 | ||
1037 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | 1113 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
1038 | hw_dbg(hw, "Copper PHY and Auto Neg " | 1114 | e_dbg("Copper PHY and Auto Neg " |
1039 | "has not completed.\n"); | 1115 | "has not completed.\n"); |
1040 | return ret_val; | 1116 | return ret_val; |
1041 | } | 1117 | } |
@@ -1076,7 +1152,6 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1076 | * 1 | 1 | 0 | 0 | e1000_fc_none | 1152 | * 1 | 1 | 0 | 0 | e1000_fc_none |
1077 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | 1153 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
1078 | * | 1154 | * |
1079 | * | ||
1080 | * Are both PAUSE bits set to 1? If so, this implies | 1155 | * Are both PAUSE bits set to 1? If so, this implies |
1081 | * Symmetric Flow Control is enabled at both ends. The | 1156 | * Symmetric Flow Control is enabled at both ends. The |
1082 | * ASM_DIR bits are irrelevant per the spec. | 1157 | * ASM_DIR bits are irrelevant per the spec. |
@@ -1100,10 +1175,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1100 | */ | 1175 | */ |
1101 | if (hw->fc.requested_mode == e1000_fc_full) { | 1176 | if (hw->fc.requested_mode == e1000_fc_full) { |
1102 | hw->fc.current_mode = e1000_fc_full; | 1177 | hw->fc.current_mode = e1000_fc_full; |
1103 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1178 | e_dbg("Flow Control = FULL.\r\n"); |
1104 | } else { | 1179 | } else { |
1105 | hw->fc.current_mode = e1000_fc_rx_pause; | 1180 | hw->fc.current_mode = e1000_fc_rx_pause; |
1106 | hw_dbg(hw, "Flow Control = " | 1181 | e_dbg("Flow Control = " |
1107 | "RX PAUSE frames only.\r\n"); | 1182 | "RX PAUSE frames only.\r\n"); |
1108 | } | 1183 | } |
1109 | } | 1184 | } |
@@ -1114,14 +1189,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1114 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1189 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
1115 | *-------|---------|-------|---------|-------------------- | 1190 | *-------|---------|-------|---------|-------------------- |
1116 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause | 1191 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause |
1117 | * | ||
1118 | */ | 1192 | */ |
1119 | else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && | 1193 | else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && |
1120 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1194 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1121 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1195 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1122 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1196 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1123 | hw->fc.current_mode = e1000_fc_tx_pause; | 1197 | hw->fc.current_mode = e1000_fc_tx_pause; |
1124 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); | 1198 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
1125 | } | 1199 | } |
1126 | /* | 1200 | /* |
1127 | * For transmitting PAUSE frames ONLY. | 1201 | * For transmitting PAUSE frames ONLY. |
@@ -1130,21 +1204,20 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1130 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1204 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
1131 | *-------|---------|-------|---------|-------------------- | 1205 | *-------|---------|-------|---------|-------------------- |
1132 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | 1206 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
1133 | * | ||
1134 | */ | 1207 | */ |
1135 | else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | 1208 | else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
1136 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1209 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1137 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1210 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1138 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1211 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1139 | hw->fc.current_mode = e1000_fc_rx_pause; | 1212 | hw->fc.current_mode = e1000_fc_rx_pause; |
1140 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); | 1213 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
1141 | } else { | 1214 | } else { |
1142 | /* | 1215 | /* |
1143 | * Per the IEEE spec, at this point flow control | 1216 | * Per the IEEE spec, at this point flow control |
1144 | * should be disabled. | 1217 | * should be disabled. |
1145 | */ | 1218 | */ |
1146 | hw->fc.current_mode = e1000_fc_none; | 1219 | hw->fc.current_mode = e1000_fc_none; |
1147 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1220 | e_dbg("Flow Control = NONE.\r\n"); |
1148 | } | 1221 | } |
1149 | 1222 | ||
1150 | /* | 1223 | /* |
@@ -1154,7 +1227,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1154 | */ | 1227 | */ |
1155 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | 1228 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
1156 | if (ret_val) { | 1229 | if (ret_val) { |
1157 | hw_dbg(hw, "Error getting link speed and duplex\n"); | 1230 | e_dbg("Error getting link speed and duplex\n"); |
1158 | return ret_val; | 1231 | return ret_val; |
1159 | } | 1232 | } |
1160 | 1233 | ||
@@ -1167,7 +1240,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1167 | */ | 1240 | */ |
1168 | ret_val = e1000e_force_mac_fc(hw); | 1241 | ret_val = e1000e_force_mac_fc(hw); |
1169 | if (ret_val) { | 1242 | if (ret_val) { |
1170 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1243 | e_dbg("Error forcing flow control settings\n"); |
1171 | return ret_val; | 1244 | return ret_val; |
1172 | } | 1245 | } |
1173 | } | 1246 | } |
@@ -1191,21 +1264,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1191 | status = er32(STATUS); | 1264 | status = er32(STATUS); |
1192 | if (status & E1000_STATUS_SPEED_1000) { | 1265 | if (status & E1000_STATUS_SPEED_1000) { |
1193 | *speed = SPEED_1000; | 1266 | *speed = SPEED_1000; |
1194 | hw_dbg(hw, "1000 Mbs, "); | 1267 | e_dbg("1000 Mbs, "); |
1195 | } else if (status & E1000_STATUS_SPEED_100) { | 1268 | } else if (status & E1000_STATUS_SPEED_100) { |
1196 | *speed = SPEED_100; | 1269 | *speed = SPEED_100; |
1197 | hw_dbg(hw, "100 Mbs, "); | 1270 | e_dbg("100 Mbs, "); |
1198 | } else { | 1271 | } else { |
1199 | *speed = SPEED_10; | 1272 | *speed = SPEED_10; |
1200 | hw_dbg(hw, "10 Mbs, "); | 1273 | e_dbg("10 Mbs, "); |
1201 | } | 1274 | } |
1202 | 1275 | ||
1203 | if (status & E1000_STATUS_FD) { | 1276 | if (status & E1000_STATUS_FD) { |
1204 | *duplex = FULL_DUPLEX; | 1277 | *duplex = FULL_DUPLEX; |
1205 | hw_dbg(hw, "Full Duplex\n"); | 1278 | e_dbg("Full Duplex\n"); |
1206 | } else { | 1279 | } else { |
1207 | *duplex = HALF_DUPLEX; | 1280 | *duplex = HALF_DUPLEX; |
1208 | hw_dbg(hw, "Half Duplex\n"); | 1281 | e_dbg("Half Duplex\n"); |
1209 | } | 1282 | } |
1210 | 1283 | ||
1211 | return 0; | 1284 | return 0; |
@@ -1251,7 +1324,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1251 | } | 1324 | } |
1252 | 1325 | ||
1253 | if (i == timeout) { | 1326 | if (i == timeout) { |
1254 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 1327 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
1255 | return -E1000_ERR_NVM; | 1328 | return -E1000_ERR_NVM; |
1256 | } | 1329 | } |
1257 | 1330 | ||
@@ -1270,7 +1343,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1270 | if (i == timeout) { | 1343 | if (i == timeout) { |
1271 | /* Release semaphores */ | 1344 | /* Release semaphores */ |
1272 | e1000e_put_hw_semaphore(hw); | 1345 | e1000e_put_hw_semaphore(hw); |
1273 | hw_dbg(hw, "Driver can't access the NVM\n"); | 1346 | e_dbg("Driver can't access the NVM\n"); |
1274 | return -E1000_ERR_NVM; | 1347 | return -E1000_ERR_NVM; |
1275 | } | 1348 | } |
1276 | 1349 | ||
@@ -1310,7 +1383,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | |||
1310 | } | 1383 | } |
1311 | 1384 | ||
1312 | if (i == AUTO_READ_DONE_TIMEOUT) { | 1385 | if (i == AUTO_READ_DONE_TIMEOUT) { |
1313 | hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); | 1386 | e_dbg("Auto read by HW from NVM has not completed.\n"); |
1314 | return -E1000_ERR_RESET; | 1387 | return -E1000_ERR_RESET; |
1315 | } | 1388 | } |
1316 | 1389 | ||
@@ -1331,7 +1404,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | |||
1331 | 1404 | ||
1332 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1405 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1333 | if (ret_val) { | 1406 | if (ret_val) { |
1334 | hw_dbg(hw, "NVM Read Error\n"); | 1407 | e_dbg("NVM Read Error\n"); |
1335 | return ret_val; | 1408 | return ret_val; |
1336 | } | 1409 | } |
1337 | 1410 | ||
@@ -1585,7 +1658,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | |||
1585 | } | 1658 | } |
1586 | 1659 | ||
1587 | if (!timeout) { | 1660 | if (!timeout) { |
1588 | hw_dbg(hw, "Master requests are pending.\n"); | 1661 | e_dbg("Master requests are pending.\n"); |
1589 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | 1662 | return -E1000_ERR_MASTER_REQUESTS_PENDING; |
1590 | } | 1663 | } |
1591 | 1664 | ||
@@ -1602,14 +1675,21 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) | |||
1602 | { | 1675 | { |
1603 | struct e1000_mac_info *mac = &hw->mac; | 1676 | struct e1000_mac_info *mac = &hw->mac; |
1604 | 1677 | ||
1678 | if (!mac->adaptive_ifs) { | ||
1679 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
1680 | goto out; | ||
1681 | } | ||
1682 | |||
1605 | mac->current_ifs_val = 0; | 1683 | mac->current_ifs_val = 0; |
1606 | mac->ifs_min_val = IFS_MIN; | 1684 | mac->ifs_min_val = IFS_MIN; |
1607 | mac->ifs_max_val = IFS_MAX; | 1685 | mac->ifs_max_val = IFS_MAX; |
1608 | mac->ifs_step_size = IFS_STEP; | 1686 | mac->ifs_step_size = IFS_STEP; |
1609 | mac->ifs_ratio = IFS_RATIO; | 1687 | mac->ifs_ratio = IFS_RATIO; |
1610 | 1688 | ||
1611 | mac->in_ifs_mode = 0; | 1689 | mac->in_ifs_mode = false; |
1612 | ew32(AIT, 0); | 1690 | ew32(AIT, 0); |
1691 | out: | ||
1692 | return; | ||
1613 | } | 1693 | } |
1614 | 1694 | ||
1615 | /** | 1695 | /** |
@@ -1623,9 +1703,14 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1623 | { | 1703 | { |
1624 | struct e1000_mac_info *mac = &hw->mac; | 1704 | struct e1000_mac_info *mac = &hw->mac; |
1625 | 1705 | ||
1706 | if (!mac->adaptive_ifs) { | ||
1707 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
1708 | goto out; | ||
1709 | } | ||
1710 | |||
1626 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | 1711 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { |
1627 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | 1712 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { |
1628 | mac->in_ifs_mode = 1; | 1713 | mac->in_ifs_mode = true; |
1629 | if (mac->current_ifs_val < mac->ifs_max_val) { | 1714 | if (mac->current_ifs_val < mac->ifs_max_val) { |
1630 | if (!mac->current_ifs_val) | 1715 | if (!mac->current_ifs_val) |
1631 | mac->current_ifs_val = mac->ifs_min_val; | 1716 | mac->current_ifs_val = mac->ifs_min_val; |
@@ -1639,10 +1724,12 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1639 | if (mac->in_ifs_mode && | 1724 | if (mac->in_ifs_mode && |
1640 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { | 1725 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { |
1641 | mac->current_ifs_val = 0; | 1726 | mac->current_ifs_val = 0; |
1642 | mac->in_ifs_mode = 0; | 1727 | mac->in_ifs_mode = false; |
1643 | ew32(AIT, 0); | 1728 | ew32(AIT, 0); |
1644 | } | 1729 | } |
1645 | } | 1730 | } |
1731 | out: | ||
1732 | return; | ||
1646 | } | 1733 | } |
1647 | 1734 | ||
1648 | /** | 1735 | /** |
@@ -1809,7 +1896,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) | |||
1809 | if (!timeout) { | 1896 | if (!timeout) { |
1810 | eecd &= ~E1000_EECD_REQ; | 1897 | eecd &= ~E1000_EECD_REQ; |
1811 | ew32(EECD, eecd); | 1898 | ew32(EECD, eecd); |
1812 | hw_dbg(hw, "Could not acquire NVM grant\n"); | 1899 | e_dbg("Could not acquire NVM grant\n"); |
1813 | return -E1000_ERR_NVM; | 1900 | return -E1000_ERR_NVM; |
1814 | } | 1901 | } |
1815 | 1902 | ||
@@ -1914,7 +2001,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1914 | } | 2001 | } |
1915 | 2002 | ||
1916 | if (!timeout) { | 2003 | if (!timeout) { |
1917 | hw_dbg(hw, "SPI NVM Status error\n"); | 2004 | e_dbg("SPI NVM Status error\n"); |
1918 | return -E1000_ERR_NVM; | 2005 | return -E1000_ERR_NVM; |
1919 | } | 2006 | } |
1920 | } | 2007 | } |
@@ -1943,7 +2030,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1943 | */ | 2030 | */ |
1944 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 2031 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1945 | (words == 0)) { | 2032 | (words == 0)) { |
1946 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 2033 | e_dbg("nvm parameter(s) out of bounds\n"); |
1947 | return -E1000_ERR_NVM; | 2034 | return -E1000_ERR_NVM; |
1948 | } | 2035 | } |
1949 | 2036 | ||
@@ -1986,11 +2073,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1986 | */ | 2073 | */ |
1987 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 2074 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1988 | (words == 0)) { | 2075 | (words == 0)) { |
1989 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 2076 | e_dbg("nvm parameter(s) out of bounds\n"); |
1990 | return -E1000_ERR_NVM; | 2077 | return -E1000_ERR_NVM; |
1991 | } | 2078 | } |
1992 | 2079 | ||
1993 | ret_val = nvm->ops.acquire_nvm(hw); | 2080 | ret_val = nvm->ops.acquire(hw); |
1994 | if (ret_val) | 2081 | if (ret_val) |
1995 | return ret_val; | 2082 | return ret_val; |
1996 | 2083 | ||
@@ -2001,7 +2088,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2001 | 2088 | ||
2002 | ret_val = e1000_ready_nvm_eeprom(hw); | 2089 | ret_val = e1000_ready_nvm_eeprom(hw); |
2003 | if (ret_val) { | 2090 | if (ret_val) { |
2004 | nvm->ops.release_nvm(hw); | 2091 | nvm->ops.release(hw); |
2005 | return ret_val; | 2092 | return ret_val; |
2006 | } | 2093 | } |
2007 | 2094 | ||
@@ -2040,72 +2127,32 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2040 | } | 2127 | } |
2041 | 2128 | ||
2042 | msleep(10); | 2129 | msleep(10); |
2043 | nvm->ops.release_nvm(hw); | 2130 | nvm->ops.release(hw); |
2044 | return 0; | 2131 | return 0; |
2045 | } | 2132 | } |
2046 | 2133 | ||
2047 | /** | 2134 | /** |
2048 | * e1000e_read_mac_addr - Read device MAC address | 2135 | * e1000_read_mac_addr_generic - Read device MAC address |
2049 | * @hw: pointer to the HW structure | 2136 | * @hw: pointer to the HW structure |
2050 | * | 2137 | * |
2051 | * Reads the device MAC address from the EEPROM and stores the value. | 2138 | * Reads the device MAC address from the EEPROM and stores the value. |
2052 | * Since devices with two ports use the same EEPROM, we increment the | 2139 | * Since devices with two ports use the same EEPROM, we increment the |
2053 | * last bit in the MAC address for the second port. | 2140 | * last bit in the MAC address for the second port. |
2054 | **/ | 2141 | **/ |
2055 | s32 e1000e_read_mac_addr(struct e1000_hw *hw) | 2142 | s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) |
2056 | { | 2143 | { |
2057 | s32 ret_val; | 2144 | u32 rar_high; |
2058 | u16 offset, nvm_data, i; | 2145 | u32 rar_low; |
2059 | u16 mac_addr_offset = 0; | 2146 | u16 i; |
2060 | |||
2061 | if (hw->mac.type == e1000_82571) { | ||
2062 | /* Check for an alternate MAC address. An alternate MAC | ||
2063 | * address can be setup by pre-boot software and must be | ||
2064 | * treated like a permanent address and must override the | ||
2065 | * actual permanent MAC address.*/ | ||
2066 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | ||
2067 | &mac_addr_offset); | ||
2068 | if (ret_val) { | ||
2069 | hw_dbg(hw, "NVM Read Error\n"); | ||
2070 | return ret_val; | ||
2071 | } | ||
2072 | if (mac_addr_offset == 0xFFFF) | ||
2073 | mac_addr_offset = 0; | ||
2074 | |||
2075 | if (mac_addr_offset) { | ||
2076 | if (hw->bus.func == E1000_FUNC_1) | ||
2077 | mac_addr_offset += ETH_ALEN/sizeof(u16); | ||
2078 | |||
2079 | /* make sure we have a valid mac address here | ||
2080 | * before using it */ | ||
2081 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | ||
2082 | &nvm_data); | ||
2083 | if (ret_val) { | ||
2084 | hw_dbg(hw, "NVM Read Error\n"); | ||
2085 | return ret_val; | ||
2086 | } | ||
2087 | if (nvm_data & 0x0001) | ||
2088 | mac_addr_offset = 0; | ||
2089 | } | ||
2090 | 2147 | ||
2091 | if (mac_addr_offset) | 2148 | rar_high = er32(RAH(0)); |
2092 | hw->dev_spec.e82571.alt_mac_addr_is_present = 1; | 2149 | rar_low = er32(RAL(0)); |
2093 | } | ||
2094 | 2150 | ||
2095 | for (i = 0; i < ETH_ALEN; i += 2) { | 2151 | for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) |
2096 | offset = mac_addr_offset + (i >> 1); | 2152 | hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); |
2097 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | ||
2098 | if (ret_val) { | ||
2099 | hw_dbg(hw, "NVM Read Error\n"); | ||
2100 | return ret_val; | ||
2101 | } | ||
2102 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); | ||
2103 | hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); | ||
2104 | } | ||
2105 | 2153 | ||
2106 | /* Flip last bit of mac address if we're on second port */ | 2154 | for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) |
2107 | if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) | 2155 | hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); |
2108 | hw->mac.perm_addr[5] ^= 1; | ||
2109 | 2156 | ||
2110 | for (i = 0; i < ETH_ALEN; i++) | 2157 | for (i = 0; i < ETH_ALEN; i++) |
2111 | hw->mac.addr[i] = hw->mac.perm_addr[i]; | 2158 | hw->mac.addr[i] = hw->mac.perm_addr[i]; |
@@ -2129,14 +2176,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | |||
2129 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 2176 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
2130 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2177 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2131 | if (ret_val) { | 2178 | if (ret_val) { |
2132 | hw_dbg(hw, "NVM Read Error\n"); | 2179 | e_dbg("NVM Read Error\n"); |
2133 | return ret_val; | 2180 | return ret_val; |
2134 | } | 2181 | } |
2135 | checksum += nvm_data; | 2182 | checksum += nvm_data; |
2136 | } | 2183 | } |
2137 | 2184 | ||
2138 | if (checksum != (u16) NVM_SUM) { | 2185 | if (checksum != (u16) NVM_SUM) { |
2139 | hw_dbg(hw, "NVM Checksum Invalid\n"); | 2186 | e_dbg("NVM Checksum Invalid\n"); |
2140 | return -E1000_ERR_NVM; | 2187 | return -E1000_ERR_NVM; |
2141 | } | 2188 | } |
2142 | 2189 | ||
@@ -2160,7 +2207,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2160 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | 2207 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
2161 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2208 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2162 | if (ret_val) { | 2209 | if (ret_val) { |
2163 | hw_dbg(hw, "NVM Read Error while updating checksum.\n"); | 2210 | e_dbg("NVM Read Error while updating checksum.\n"); |
2164 | return ret_val; | 2211 | return ret_val; |
2165 | } | 2212 | } |
2166 | checksum += nvm_data; | 2213 | checksum += nvm_data; |
@@ -2168,7 +2215,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2168 | checksum = (u16) NVM_SUM - checksum; | 2215 | checksum = (u16) NVM_SUM - checksum; |
2169 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | 2216 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
2170 | if (ret_val) | 2217 | if (ret_val) |
2171 | hw_dbg(hw, "NVM Write Error while updating checksum.\n"); | 2218 | e_dbg("NVM Write Error while updating checksum.\n"); |
2172 | 2219 | ||
2173 | return ret_val; | 2220 | return ret_val; |
2174 | } | 2221 | } |
@@ -2231,7 +2278,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2231 | /* Check that the host interface is enabled. */ | 2278 | /* Check that the host interface is enabled. */ |
2232 | hicr = er32(HICR); | 2279 | hicr = er32(HICR); |
2233 | if ((hicr & E1000_HICR_EN) == 0) { | 2280 | if ((hicr & E1000_HICR_EN) == 0) { |
2234 | hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); | 2281 | e_dbg("E1000_HOST_EN bit disabled.\n"); |
2235 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2282 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2236 | } | 2283 | } |
2237 | /* check the previous command is completed */ | 2284 | /* check the previous command is completed */ |
@@ -2243,7 +2290,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2243 | } | 2290 | } |
2244 | 2291 | ||
2245 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 2292 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
2246 | hw_dbg(hw, "Previous command timeout failed .\n"); | 2293 | e_dbg("Previous command timeout failed .\n"); |
2247 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2294 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2248 | } | 2295 | } |
2249 | 2296 | ||
@@ -2280,10 +2327,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2280 | s32 ret_val, hdr_csum, csum; | 2327 | s32 ret_val, hdr_csum, csum; |
2281 | u8 i, len; | 2328 | u8 i, len; |
2282 | 2329 | ||
2330 | hw->mac.tx_pkt_filtering = true; | ||
2331 | |||
2283 | /* No manageability, no filtering */ | 2332 | /* No manageability, no filtering */ |
2284 | if (!e1000e_check_mng_mode(hw)) { | 2333 | if (!e1000e_check_mng_mode(hw)) { |
2285 | hw->mac.tx_pkt_filtering = 0; | 2334 | hw->mac.tx_pkt_filtering = false; |
2286 | return 0; | 2335 | goto out; |
2287 | } | 2336 | } |
2288 | 2337 | ||
2289 | /* | 2338 | /* |
@@ -2291,9 +2340,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2291 | * reason, disable filtering. | 2340 | * reason, disable filtering. |
2292 | */ | 2341 | */ |
2293 | ret_val = e1000_mng_enable_host_if(hw); | 2342 | ret_val = e1000_mng_enable_host_if(hw); |
2294 | if (ret_val != 0) { | 2343 | if (ret_val) { |
2295 | hw->mac.tx_pkt_filtering = 0; | 2344 | hw->mac.tx_pkt_filtering = false; |
2296 | return ret_val; | 2345 | goto out; |
2297 | } | 2346 | } |
2298 | 2347 | ||
2299 | /* Read in the header. Length and offset are in dwords. */ | 2348 | /* Read in the header. Length and offset are in dwords. */ |
@@ -2311,18 +2360,18 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2311 | * take the safe route of assuming Tx filtering is enabled. | 2360 | * take the safe route of assuming Tx filtering is enabled. |
2312 | */ | 2361 | */ |
2313 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { | 2362 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { |
2314 | hw->mac.tx_pkt_filtering = 1; | 2363 | hw->mac.tx_pkt_filtering = true; |
2315 | return 1; | 2364 | goto out; |
2316 | } | 2365 | } |
2317 | 2366 | ||
2318 | /* Cookie area is valid, make the final check for filtering. */ | 2367 | /* Cookie area is valid, make the final check for filtering. */ |
2319 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { | 2368 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { |
2320 | hw->mac.tx_pkt_filtering = 0; | 2369 | hw->mac.tx_pkt_filtering = false; |
2321 | return 0; | 2370 | goto out; |
2322 | } | 2371 | } |
2323 | 2372 | ||
2324 | hw->mac.tx_pkt_filtering = 1; | 2373 | out: |
2325 | return 1; | 2374 | return hw->mac.tx_pkt_filtering; |
2326 | } | 2375 | } |
2327 | 2376 | ||
2328 | /** | 2377 | /** |
@@ -2353,7 +2402,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, | |||
2353 | } | 2402 | } |
2354 | 2403 | ||
2355 | /** | 2404 | /** |
2356 | * e1000_mng_host_if_write - Writes to the manageability host interface | 2405 | * e1000_mng_host_if_write - Write to the manageability host interface |
2357 | * @hw: pointer to the HW structure | 2406 | * @hw: pointer to the HW structure |
2358 | * @buffer: pointer to the host interface buffer | 2407 | * @buffer: pointer to the host interface buffer |
2359 | * @length: size of the buffer | 2408 | * @length: size of the buffer |
@@ -2478,7 +2527,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2478 | { | 2527 | { |
2479 | u32 manc; | 2528 | u32 manc; |
2480 | u32 fwsm, factps; | 2529 | u32 fwsm, factps; |
2481 | bool ret_val = 0; | 2530 | bool ret_val = false; |
2482 | 2531 | ||
2483 | manc = er32(MANC); | 2532 | manc = er32(MANC); |
2484 | 2533 | ||
@@ -2493,13 +2542,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2493 | if (!(factps & E1000_FACTPS_MNGCG) && | 2542 | if (!(factps & E1000_FACTPS_MNGCG) && |
2494 | ((fwsm & E1000_FWSM_MODE_MASK) == | 2543 | ((fwsm & E1000_FWSM_MODE_MASK) == |
2495 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { | 2544 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { |
2496 | ret_val = 1; | 2545 | ret_val = true; |
2497 | return ret_val; | 2546 | return ret_val; |
2498 | } | 2547 | } |
2499 | } else { | 2548 | } else { |
2500 | if ((manc & E1000_MANC_SMBUS_EN) && | 2549 | if ((manc & E1000_MANC_SMBUS_EN) && |
2501 | !(manc & E1000_MANC_ASF_EN)) { | 2550 | !(manc & E1000_MANC_ASF_EN)) { |
2502 | ret_val = 1; | 2551 | ret_val = true; |
2503 | return ret_val; | 2552 | return ret_val; |
2504 | } | 2553 | } |
2505 | } | 2554 | } |
@@ -2514,14 +2563,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) | |||
2514 | 2563 | ||
2515 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | 2564 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
2516 | if (ret_val) { | 2565 | if (ret_val) { |
2517 | hw_dbg(hw, "NVM Read Error\n"); | 2566 | e_dbg("NVM Read Error\n"); |
2518 | return ret_val; | 2567 | return ret_val; |
2519 | } | 2568 | } |
2520 | *pba_num = (u32)(nvm_data << 16); | 2569 | *pba_num = (u32)(nvm_data << 16); |
2521 | 2570 | ||
2522 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2571 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2523 | if (ret_val) { | 2572 | if (ret_val) { |
2524 | hw_dbg(hw, "NVM Read Error\n"); | 2573 | e_dbg("NVM Read Error\n"); |
2525 | return ret_val; | 2574 | return ret_val; |
2526 | } | 2575 | } |
2527 | *pba_num |= nvm_data; | 2576 | *pba_num |= nvm_data; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fad8f9ea0043..dbf81788bb40 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/netdevice.h> | 36 | #include <linux/netdevice.h> |
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/ipv6.h> | 38 | #include <linux/ipv6.h> |
39 | #include <linux/slab.h> | ||
39 | #include <net/checksum.h> | 40 | #include <net/checksum.h> |
40 | #include <net/ip6_checksum.h> | 41 | #include <net/ip6_checksum.h> |
41 | #include <linux/mii.h> | 42 | #include <linux/mii.h> |
@@ -65,17 +66,6 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
65 | [board_pchlan] = &e1000_pch_info, | 66 | [board_pchlan] = &e1000_pch_info, |
66 | }; | 67 | }; |
67 | 68 | ||
68 | #ifdef DEBUG | ||
69 | /** | ||
70 | * e1000_get_hw_dev_name - return device name string | ||
71 | * used by hardware layer to print debugging information | ||
72 | **/ | ||
73 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | ||
74 | { | ||
75 | return hw->adapter->netdev->name; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | /** | 69 | /** |
80 | * e1000_desc_unused - calculate if we have unused descriptors | 70 | * e1000_desc_unused - calculate if we have unused descriptors |
81 | **/ | 71 | **/ |
@@ -167,7 +157,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
167 | struct e1000_buffer *buffer_info; | 157 | struct e1000_buffer *buffer_info; |
168 | struct sk_buff *skb; | 158 | struct sk_buff *skb; |
169 | unsigned int i; | 159 | unsigned int i; |
170 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | 160 | unsigned int bufsz = adapter->rx_buffer_len; |
171 | 161 | ||
172 | i = rx_ring->next_to_use; | 162 | i = rx_ring->next_to_use; |
173 | buffer_info = &rx_ring->buffer_info[i]; | 163 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -179,20 +169,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
179 | goto map_skb; | 169 | goto map_skb; |
180 | } | 170 | } |
181 | 171 | ||
182 | skb = netdev_alloc_skb(netdev, bufsz); | 172 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
183 | if (!skb) { | 173 | if (!skb) { |
184 | /* Better luck next round */ | 174 | /* Better luck next round */ |
185 | adapter->alloc_rx_buff_failed++; | 175 | adapter->alloc_rx_buff_failed++; |
186 | break; | 176 | break; |
187 | } | 177 | } |
188 | 178 | ||
189 | /* | ||
190 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
191 | * this will result in a 16 byte aligned IP header after | ||
192 | * the 14 byte MAC header is removed | ||
193 | */ | ||
194 | skb_reserve(skb, NET_IP_ALIGN); | ||
195 | |||
196 | buffer_info->skb = skb; | 179 | buffer_info->skb = skb; |
197 | map_skb: | 180 | map_skb: |
198 | buffer_info->dma = pci_map_single(pdev, skb->data, | 181 | buffer_info->dma = pci_map_single(pdev, skb->data, |
@@ -284,21 +267,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
284 | cpu_to_le64(ps_page->dma); | 267 | cpu_to_le64(ps_page->dma); |
285 | } | 268 | } |
286 | 269 | ||
287 | skb = netdev_alloc_skb(netdev, | 270 | skb = netdev_alloc_skb_ip_align(netdev, |
288 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 271 | adapter->rx_ps_bsize0); |
289 | 272 | ||
290 | if (!skb) { | 273 | if (!skb) { |
291 | adapter->alloc_rx_buff_failed++; | 274 | adapter->alloc_rx_buff_failed++; |
292 | break; | 275 | break; |
293 | } | 276 | } |
294 | 277 | ||
295 | /* | ||
296 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
297 | * this will result in a 16 byte aligned IP header after | ||
298 | * the 14 byte MAC header is removed | ||
299 | */ | ||
300 | skb_reserve(skb, NET_IP_ALIGN); | ||
301 | |||
302 | buffer_info->skb = skb; | 278 | buffer_info->skb = skb; |
303 | buffer_info->dma = pci_map_single(pdev, skb->data, | 279 | buffer_info->dma = pci_map_single(pdev, skb->data, |
304 | adapter->rx_ps_bsize0, | 280 | adapter->rx_ps_bsize0, |
@@ -359,9 +335,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
359 | struct e1000_buffer *buffer_info; | 335 | struct e1000_buffer *buffer_info; |
360 | struct sk_buff *skb; | 336 | struct sk_buff *skb; |
361 | unsigned int i; | 337 | unsigned int i; |
362 | unsigned int bufsz = 256 - | 338 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; |
363 | 16 /* for skb_reserve */ - | ||
364 | NET_IP_ALIGN; | ||
365 | 339 | ||
366 | i = rx_ring->next_to_use; | 340 | i = rx_ring->next_to_use; |
367 | buffer_info = &rx_ring->buffer_info[i]; | 341 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -373,19 +347,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
373 | goto check_page; | 347 | goto check_page; |
374 | } | 348 | } |
375 | 349 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | 350 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
377 | if (unlikely(!skb)) { | 351 | if (unlikely(!skb)) { |
378 | /* Better luck next round */ | 352 | /* Better luck next round */ |
379 | adapter->alloc_rx_buff_failed++; | 353 | adapter->alloc_rx_buff_failed++; |
380 | break; | 354 | break; |
381 | } | 355 | } |
382 | 356 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
384 | * this will result in a 16 byte aligned IP header after | ||
385 | * the 14 byte MAC header is removed | ||
386 | */ | ||
387 | skb_reserve(skb, NET_IP_ALIGN); | ||
388 | |||
389 | buffer_info->skb = skb; | 357 | buffer_info->skb = skb; |
390 | check_page: | 358 | check_page: |
391 | /* allocate a new page if necessary */ | 359 | /* allocate a new page if necessary */ |
@@ -437,6 +405,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
437 | { | 405 | { |
438 | struct net_device *netdev = adapter->netdev; | 406 | struct net_device *netdev = adapter->netdev; |
439 | struct pci_dev *pdev = adapter->pdev; | 407 | struct pci_dev *pdev = adapter->pdev; |
408 | struct e1000_hw *hw = &adapter->hw; | ||
440 | struct e1000_ring *rx_ring = adapter->rx_ring; | 409 | struct e1000_ring *rx_ring = adapter->rx_ring; |
441 | struct e1000_rx_desc *rx_desc, *next_rxd; | 410 | struct e1000_rx_desc *rx_desc, *next_rxd; |
442 | struct e1000_buffer *buffer_info, *next_buffer; | 411 | struct e1000_buffer *buffer_info, *next_buffer; |
@@ -482,14 +451,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
482 | 451 | ||
483 | length = le16_to_cpu(rx_desc->length); | 452 | length = le16_to_cpu(rx_desc->length); |
484 | 453 | ||
485 | /* !EOP means multiple descriptors were used to store a single | 454 | /* |
486 | * packet, also make sure the frame isn't just CRC only */ | 455 | * !EOP means multiple descriptors were used to store a single |
487 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 456 | * packet, if that's the case we need to toss it. In fact, we |
457 | * need to toss every packet with the EOP bit clear and the | ||
458 | * next frame that _does_ have the EOP bit set, as it is by | ||
459 | * definition only a frame fragment | ||
460 | */ | ||
461 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
462 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
463 | |||
464 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
488 | /* All receives must fit into a single buffer */ | 465 | /* All receives must fit into a single buffer */ |
489 | e_dbg("%s: Receive packet consumed multiple buffers\n", | 466 | e_dbg("Receive packet consumed multiple buffers\n"); |
490 | netdev->name); | ||
491 | /* recycle */ | 467 | /* recycle */ |
492 | buffer_info->skb = skb; | 468 | buffer_info->skb = skb; |
469 | if (status & E1000_RXD_STAT_EOP) | ||
470 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
493 | goto next_desc; | 471 | goto next_desc; |
494 | } | 472 | } |
495 | 473 | ||
@@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
513 | */ | 491 | */ |
514 | if (length < copybreak) { | 492 | if (length < copybreak) { |
515 | struct sk_buff *new_skb = | 493 | struct sk_buff *new_skb = |
516 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 494 | netdev_alloc_skb_ip_align(netdev, length); |
517 | if (new_skb) { | 495 | if (new_skb) { |
518 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
519 | skb_copy_to_linear_data_offset(new_skb, | 496 | skb_copy_to_linear_data_offset(new_skb, |
520 | -NET_IP_ALIGN, | 497 | -NET_IP_ALIGN, |
521 | (skb->data - | 498 | (skb->data - |
@@ -560,33 +537,52 @@ next_desc: | |||
560 | 537 | ||
561 | adapter->total_rx_bytes += total_rx_bytes; | 538 | adapter->total_rx_bytes += total_rx_bytes; |
562 | adapter->total_rx_packets += total_rx_packets; | 539 | adapter->total_rx_packets += total_rx_packets; |
563 | adapter->net_stats.rx_bytes += total_rx_bytes; | 540 | netdev->stats.rx_bytes += total_rx_bytes; |
564 | adapter->net_stats.rx_packets += total_rx_packets; | 541 | netdev->stats.rx_packets += total_rx_packets; |
565 | return cleaned; | 542 | return cleaned; |
566 | } | 543 | } |
567 | 544 | ||
568 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | 545 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
569 | struct e1000_buffer *buffer_info) | 546 | struct e1000_buffer *buffer_info) |
570 | { | 547 | { |
571 | buffer_info->dma = 0; | 548 | if (buffer_info->dma) { |
549 | if (buffer_info->mapped_as_page) | ||
550 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
551 | buffer_info->length, PCI_DMA_TODEVICE); | ||
552 | else | ||
553 | pci_unmap_single(adapter->pdev, buffer_info->dma, | ||
554 | buffer_info->length, | ||
555 | PCI_DMA_TODEVICE); | ||
556 | buffer_info->dma = 0; | ||
557 | } | ||
572 | if (buffer_info->skb) { | 558 | if (buffer_info->skb) { |
573 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
574 | DMA_TO_DEVICE); | ||
575 | dev_kfree_skb_any(buffer_info->skb); | 559 | dev_kfree_skb_any(buffer_info->skb); |
576 | buffer_info->skb = NULL; | 560 | buffer_info->skb = NULL; |
577 | } | 561 | } |
578 | buffer_info->time_stamp = 0; | 562 | buffer_info->time_stamp = 0; |
579 | } | 563 | } |
580 | 564 | ||
581 | static void e1000_print_tx_hang(struct e1000_adapter *adapter) | 565 | static void e1000_print_hw_hang(struct work_struct *work) |
582 | { | 566 | { |
567 | struct e1000_adapter *adapter = container_of(work, | ||
568 | struct e1000_adapter, | ||
569 | print_hang_task); | ||
583 | struct e1000_ring *tx_ring = adapter->tx_ring; | 570 | struct e1000_ring *tx_ring = adapter->tx_ring; |
584 | unsigned int i = tx_ring->next_to_clean; | 571 | unsigned int i = tx_ring->next_to_clean; |
585 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | 572 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
586 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | 573 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
574 | struct e1000_hw *hw = &adapter->hw; | ||
575 | u16 phy_status, phy_1000t_status, phy_ext_status; | ||
576 | u16 pci_status; | ||
577 | |||
578 | e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
579 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | ||
580 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | ||
587 | 581 | ||
588 | /* detected Tx unit hang */ | 582 | pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); |
589 | e_err("Detected Tx Unit Hang:\n" | 583 | |
584 | /* detected Hardware unit hang */ | ||
585 | e_err("Detected Hardware Unit Hang:\n" | ||
590 | " TDH <%x>\n" | 586 | " TDH <%x>\n" |
591 | " TDT <%x>\n" | 587 | " TDT <%x>\n" |
592 | " next_to_use <%x>\n" | 588 | " next_to_use <%x>\n" |
@@ -595,7 +591,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
595 | " time_stamp <%lx>\n" | 591 | " time_stamp <%lx>\n" |
596 | " next_to_watch <%x>\n" | 592 | " next_to_watch <%x>\n" |
597 | " jiffies <%lx>\n" | 593 | " jiffies <%lx>\n" |
598 | " next_to_watch.status <%x>\n", | 594 | " next_to_watch.status <%x>\n" |
595 | "MAC Status <%x>\n" | ||
596 | "PHY Status <%x>\n" | ||
597 | "PHY 1000BASE-T Status <%x>\n" | ||
598 | "PHY Extended Status <%x>\n" | ||
599 | "PCI Status <%x>\n", | ||
599 | readl(adapter->hw.hw_addr + tx_ring->head), | 600 | readl(adapter->hw.hw_addr + tx_ring->head), |
600 | readl(adapter->hw.hw_addr + tx_ring->tail), | 601 | readl(adapter->hw.hw_addr + tx_ring->tail), |
601 | tx_ring->next_to_use, | 602 | tx_ring->next_to_use, |
@@ -603,7 +604,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
603 | tx_ring->buffer_info[eop].time_stamp, | 604 | tx_ring->buffer_info[eop].time_stamp, |
604 | eop, | 605 | eop, |
605 | jiffies, | 606 | jiffies, |
606 | eop_desc->upper.fields.status); | 607 | eop_desc->upper.fields.status, |
608 | er32(STATUS), | ||
609 | phy_status, | ||
610 | phy_1000t_status, | ||
611 | phy_ext_status, | ||
612 | pci_status); | ||
607 | } | 613 | } |
608 | 614 | ||
609 | /** | 615 | /** |
@@ -655,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
655 | i = 0; | 661 | i = 0; |
656 | } | 662 | } |
657 | 663 | ||
664 | if (i == tx_ring->next_to_use) | ||
665 | break; | ||
658 | eop = tx_ring->buffer_info[i].next_to_watch; | 666 | eop = tx_ring->buffer_info[i].next_to_watch; |
659 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 667 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
660 | } | 668 | } |
@@ -677,21 +685,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
677 | } | 685 | } |
678 | 686 | ||
679 | if (adapter->detect_tx_hung) { | 687 | if (adapter->detect_tx_hung) { |
680 | /* Detect a transmit hang in hardware, this serializes the | 688 | /* |
681 | * check with the clearing of time_stamp and movement of i */ | 689 | * Detect a transmit hang in hardware, this serializes the |
690 | * check with the clearing of time_stamp and movement of i | ||
691 | */ | ||
682 | adapter->detect_tx_hung = 0; | 692 | adapter->detect_tx_hung = 0; |
683 | if (tx_ring->buffer_info[i].time_stamp && | 693 | if (tx_ring->buffer_info[i].time_stamp && |
684 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp | 694 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp |
685 | + (adapter->tx_timeout_factor * HZ)) | 695 | + (adapter->tx_timeout_factor * HZ)) && |
686 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 696 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
687 | e1000_print_tx_hang(adapter); | 697 | schedule_work(&adapter->print_hang_task); |
688 | netif_stop_queue(netdev); | 698 | netif_stop_queue(netdev); |
689 | } | 699 | } |
690 | } | 700 | } |
691 | adapter->total_tx_bytes += total_tx_bytes; | 701 | adapter->total_tx_bytes += total_tx_bytes; |
692 | adapter->total_tx_packets += total_tx_packets; | 702 | adapter->total_tx_packets += total_tx_packets; |
693 | adapter->net_stats.tx_bytes += total_tx_bytes; | 703 | netdev->stats.tx_bytes += total_tx_bytes; |
694 | adapter->net_stats.tx_packets += total_tx_packets; | 704 | netdev->stats.tx_packets += total_tx_packets; |
695 | return (count < tx_ring->count); | 705 | return (count < tx_ring->count); |
696 | } | 706 | } |
697 | 707 | ||
@@ -705,6 +715,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
705 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 715 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
706 | int *work_done, int work_to_do) | 716 | int *work_done, int work_to_do) |
707 | { | 717 | { |
718 | struct e1000_hw *hw = &adapter->hw; | ||
708 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 719 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
709 | struct net_device *netdev = adapter->netdev; | 720 | struct net_device *netdev = adapter->netdev; |
710 | struct pci_dev *pdev = adapter->pdev; | 721 | struct pci_dev *pdev = adapter->pdev; |
@@ -747,10 +758,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
747 | PCI_DMA_FROMDEVICE); | 758 | PCI_DMA_FROMDEVICE); |
748 | buffer_info->dma = 0; | 759 | buffer_info->dma = 0; |
749 | 760 | ||
750 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 761 | /* see !EOP comment in other rx routine */ |
751 | e_dbg("%s: Packet Split buffers didn't pick up the " | 762 | if (!(staterr & E1000_RXD_STAT_EOP)) |
752 | "full packet\n", netdev->name); | 763 | adapter->flags2 |= FLAG2_IS_DISCARDING; |
764 | |||
765 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
766 | e_dbg("Packet Split buffers didn't pick up the full " | ||
767 | "packet\n"); | ||
753 | dev_kfree_skb_irq(skb); | 768 | dev_kfree_skb_irq(skb); |
769 | if (staterr & E1000_RXD_STAT_EOP) | ||
770 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
754 | goto next_desc; | 771 | goto next_desc; |
755 | } | 772 | } |
756 | 773 | ||
@@ -762,8 +779,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
762 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 779 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
763 | 780 | ||
764 | if (!length) { | 781 | if (!length) { |
765 | e_dbg("%s: Last part of the packet spanning multiple " | 782 | e_dbg("Last part of the packet spanning multiple " |
766 | "descriptors\n", netdev->name); | 783 | "descriptors\n"); |
767 | dev_kfree_skb_irq(skb); | 784 | dev_kfree_skb_irq(skb); |
768 | goto next_desc; | 785 | goto next_desc; |
769 | } | 786 | } |
@@ -871,8 +888,8 @@ next_desc: | |||
871 | 888 | ||
872 | adapter->total_rx_bytes += total_rx_bytes; | 889 | adapter->total_rx_bytes += total_rx_bytes; |
873 | adapter->total_rx_packets += total_rx_packets; | 890 | adapter->total_rx_packets += total_rx_packets; |
874 | adapter->net_stats.rx_bytes += total_rx_bytes; | 891 | netdev->stats.rx_bytes += total_rx_bytes; |
875 | adapter->net_stats.rx_packets += total_rx_packets; | 892 | netdev->stats.rx_packets += total_rx_packets; |
876 | return cleaned; | 893 | return cleaned; |
877 | } | 894 | } |
878 | 895 | ||
@@ -1051,8 +1068,8 @@ next_desc: | |||
1051 | 1068 | ||
1052 | adapter->total_rx_bytes += total_rx_bytes; | 1069 | adapter->total_rx_bytes += total_rx_bytes; |
1053 | adapter->total_rx_packets += total_rx_packets; | 1070 | adapter->total_rx_packets += total_rx_packets; |
1054 | adapter->net_stats.rx_bytes += total_rx_bytes; | 1071 | netdev->stats.rx_bytes += total_rx_bytes; |
1055 | adapter->net_stats.rx_packets += total_rx_packets; | 1072 | netdev->stats.rx_packets += total_rx_packets; |
1056 | return cleaned; | 1073 | return cleaned; |
1057 | } | 1074 | } |
1058 | 1075 | ||
@@ -1120,6 +1137,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1120 | 1137 | ||
1121 | rx_ring->next_to_clean = 0; | 1138 | rx_ring->next_to_clean = 0; |
1122 | rx_ring->next_to_use = 0; | 1139 | rx_ring->next_to_use = 0; |
1140 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
1123 | 1141 | ||
1124 | writel(0, adapter->hw.hw_addr + rx_ring->head); | 1142 | writel(0, adapter->hw.hw_addr + rx_ring->head); |
1125 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | 1143 | writel(0, adapter->hw.hw_addr + rx_ring->tail); |
@@ -1199,7 +1217,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1199 | struct e1000_hw *hw = &adapter->hw; | 1217 | struct e1000_hw *hw = &adapter->hw; |
1200 | u32 rctl, icr = er32(ICR); | 1218 | u32 rctl, icr = er32(ICR); |
1201 | 1219 | ||
1202 | if (!icr) | 1220 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) |
1203 | return IRQ_NONE; /* Not our interrupt */ | 1221 | return IRQ_NONE; /* Not our interrupt */ |
1204 | 1222 | ||
1205 | /* | 1223 | /* |
@@ -1481,7 +1499,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1481 | else | 1499 | else |
1482 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | 1500 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); |
1483 | err = request_irq(adapter->msix_entries[vector].vector, | 1501 | err = request_irq(adapter->msix_entries[vector].vector, |
1484 | &e1000_intr_msix_rx, 0, adapter->rx_ring->name, | 1502 | e1000_intr_msix_rx, 0, adapter->rx_ring->name, |
1485 | netdev); | 1503 | netdev); |
1486 | if (err) | 1504 | if (err) |
1487 | goto out; | 1505 | goto out; |
@@ -1494,7 +1512,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1494 | else | 1512 | else |
1495 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | 1513 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); |
1496 | err = request_irq(adapter->msix_entries[vector].vector, | 1514 | err = request_irq(adapter->msix_entries[vector].vector, |
1497 | &e1000_intr_msix_tx, 0, adapter->tx_ring->name, | 1515 | e1000_intr_msix_tx, 0, adapter->tx_ring->name, |
1498 | netdev); | 1516 | netdev); |
1499 | if (err) | 1517 | if (err) |
1500 | goto out; | 1518 | goto out; |
@@ -1503,7 +1521,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1503 | vector++; | 1521 | vector++; |
1504 | 1522 | ||
1505 | err = request_irq(adapter->msix_entries[vector].vector, | 1523 | err = request_irq(adapter->msix_entries[vector].vector, |
1506 | &e1000_msix_other, 0, netdev->name, netdev); | 1524 | e1000_msix_other, 0, netdev->name, netdev); |
1507 | if (err) | 1525 | if (err) |
1508 | goto out; | 1526 | goto out; |
1509 | 1527 | ||
@@ -1534,7 +1552,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1534 | e1000e_set_interrupt_capability(adapter); | 1552 | e1000e_set_interrupt_capability(adapter); |
1535 | } | 1553 | } |
1536 | if (adapter->flags & FLAG_MSI_ENABLED) { | 1554 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1537 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, | 1555 | err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, |
1538 | netdev->name, netdev); | 1556 | netdev->name, netdev); |
1539 | if (!err) | 1557 | if (!err) |
1540 | return err; | 1558 | return err; |
@@ -1544,7 +1562,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1544 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | 1562 | adapter->int_mode = E1000E_INT_MODE_LEGACY; |
1545 | } | 1563 | } |
1546 | 1564 | ||
1547 | err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, | 1565 | err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, |
1548 | netdev->name, netdev); | 1566 | netdev->name, netdev); |
1549 | if (err) | 1567 | if (err) |
1550 | e_err("Unable to allocate interrupt, Error: %d\n", err); | 1568 | e_err("Unable to allocate interrupt, Error: %d\n", err); |
@@ -2040,11 +2058,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2040 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2058 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2041 | (vid == adapter->mng_vlan_id)) | 2059 | (vid == adapter->mng_vlan_id)) |
2042 | return; | 2060 | return; |
2061 | |||
2043 | /* add VID to filter table */ | 2062 | /* add VID to filter table */ |
2044 | index = (vid >> 5) & 0x7F; | 2063 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2045 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2064 | index = (vid >> 5) & 0x7F; |
2046 | vfta |= (1 << (vid & 0x1F)); | 2065 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2047 | e1000e_write_vfta(hw, index, vfta); | 2066 | vfta |= (1 << (vid & 0x1F)); |
2067 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2068 | } | ||
2048 | } | 2069 | } |
2049 | 2070 | ||
2050 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 2071 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
@@ -2069,10 +2090,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2069 | } | 2090 | } |
2070 | 2091 | ||
2071 | /* remove VID from filter table */ | 2092 | /* remove VID from filter table */ |
2072 | index = (vid >> 5) & 0x7F; | 2093 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2073 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2094 | index = (vid >> 5) & 0x7F; |
2074 | vfta &= ~(1 << (vid & 0x1F)); | 2095 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2075 | e1000e_write_vfta(hw, index, vfta); | 2096 | vfta &= ~(1 << (vid & 0x1F)); |
2097 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2098 | } | ||
2076 | } | 2099 | } |
2077 | 2100 | ||
2078 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | 2101 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
@@ -2269,8 +2292,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2269 | ew32(TCTL, tctl); | 2292 | ew32(TCTL, tctl); |
2270 | 2293 | ||
2271 | e1000e_config_collision_dist(hw); | 2294 | e1000e_config_collision_dist(hw); |
2272 | |||
2273 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
2274 | } | 2295 | } |
2275 | 2296 | ||
2276 | /** | 2297 | /** |
@@ -2330,18 +2351,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2330 | rctl &= ~E1000_RCTL_SZ_4096; | 2351 | rctl &= ~E1000_RCTL_SZ_4096; |
2331 | rctl |= E1000_RCTL_BSEX; | 2352 | rctl |= E1000_RCTL_BSEX; |
2332 | switch (adapter->rx_buffer_len) { | 2353 | switch (adapter->rx_buffer_len) { |
2333 | case 256: | ||
2334 | rctl |= E1000_RCTL_SZ_256; | ||
2335 | rctl &= ~E1000_RCTL_BSEX; | ||
2336 | break; | ||
2337 | case 512: | ||
2338 | rctl |= E1000_RCTL_SZ_512; | ||
2339 | rctl &= ~E1000_RCTL_BSEX; | ||
2340 | break; | ||
2341 | case 1024: | ||
2342 | rctl |= E1000_RCTL_SZ_1024; | ||
2343 | rctl &= ~E1000_RCTL_BSEX; | ||
2344 | break; | ||
2345 | case 2048: | 2354 | case 2048: |
2346 | default: | 2355 | default: |
2347 | rctl |= E1000_RCTL_SZ_2048; | 2356 | rctl |= E1000_RCTL_SZ_2048; |
@@ -2464,8 +2473,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2464 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | 2473 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2465 | 2474 | ||
2466 | ctrl_ext = er32(CTRL_EXT); | 2475 | ctrl_ext = er32(CTRL_EXT); |
2467 | /* Reset delay timers after every interrupt */ | ||
2468 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | ||
2469 | /* Auto-Mask interrupts upon ICR access */ | 2476 | /* Auto-Mask interrupts upon ICR access */ |
2470 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2477 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2471 | ew32(IAM, 0xffffffff); | 2478 | ew32(IAM, 0xffffffff); |
@@ -2507,21 +2514,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2507 | * packet size is equal or larger than the specified value (in 8 byte | 2514 | * packet size is equal or larger than the specified value (in 8 byte |
2508 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | 2515 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
2509 | */ | 2516 | */ |
2510 | if ((adapter->flags & FLAG_HAS_ERT) && | 2517 | if (adapter->flags & FLAG_HAS_ERT) { |
2511 | (adapter->netdev->mtu > ETH_DATA_LEN)) { | 2518 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2512 | u32 rxdctl = er32(RXDCTL(0)); | 2519 | u32 rxdctl = er32(RXDCTL(0)); |
2513 | ew32(RXDCTL(0), rxdctl | 0x3); | 2520 | ew32(RXDCTL(0), rxdctl | 0x3); |
2514 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | 2521 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); |
2515 | /* | 2522 | /* |
2516 | * With jumbo frames and early-receive enabled, excessive | 2523 | * With jumbo frames and early-receive enabled, |
2517 | * C4->C2 latencies result in dropped transactions. | 2524 | * excessive C-state transition latencies result in |
2518 | */ | 2525 | * dropped transactions. |
2519 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2526 | */ |
2520 | e1000e_driver_name, 55); | 2527 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2521 | } else { | 2528 | adapter->netdev->name, 55); |
2522 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2529 | } else { |
2523 | e1000e_driver_name, | 2530 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2524 | PM_QOS_DEFAULT_VALUE); | 2531 | adapter->netdev->name, |
2532 | PM_QOS_DEFAULT_VALUE); | ||
2533 | } | ||
2525 | } | 2534 | } |
2526 | 2535 | ||
2527 | /* Enable Receives */ | 2536 | /* Enable Receives */ |
@@ -2533,22 +2542,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2533 | * @hw: pointer to the HW structure | 2542 | * @hw: pointer to the HW structure |
2534 | * @mc_addr_list: array of multicast addresses to program | 2543 | * @mc_addr_list: array of multicast addresses to program |
2535 | * @mc_addr_count: number of multicast addresses to program | 2544 | * @mc_addr_count: number of multicast addresses to program |
2536 | * @rar_used_count: the first RAR register free to program | ||
2537 | * @rar_count: total number of supported Receive Address Registers | ||
2538 | * | 2545 | * |
2539 | * Updates the Receive Address Registers and Multicast Table Array. | 2546 | * Updates the Multicast Table Array. |
2540 | * The caller must have a packed mc_addr_list of multicast addresses. | 2547 | * The caller must have a packed mc_addr_list of multicast addresses. |
2541 | * The parameter rar_count will usually be hw->mac.rar_entry_count | ||
2542 | * unless there are workarounds that change this. Currently no func pointer | ||
2543 | * exists and all implementations are handled in the generic version of this | ||
2544 | * function. | ||
2545 | **/ | 2548 | **/ |
2546 | static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, | 2549 | static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, |
2547 | u32 mc_addr_count, u32 rar_used_count, | 2550 | u32 mc_addr_count) |
2548 | u32 rar_count) | ||
2549 | { | 2551 | { |
2550 | hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, | 2552 | hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); |
2551 | rar_used_count, rar_count); | ||
2552 | } | 2553 | } |
2553 | 2554 | ||
2554 | /** | 2555 | /** |
@@ -2564,7 +2565,6 @@ static void e1000_set_multi(struct net_device *netdev) | |||
2564 | { | 2565 | { |
2565 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2566 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2566 | struct e1000_hw *hw = &adapter->hw; | 2567 | struct e1000_hw *hw = &adapter->hw; |
2567 | struct e1000_mac_info *mac = &hw->mac; | ||
2568 | struct dev_mc_list *mc_ptr; | 2568 | struct dev_mc_list *mc_ptr; |
2569 | u8 *mta_list; | 2569 | u8 *mta_list; |
2570 | u32 rctl; | 2570 | u32 rctl; |
@@ -2590,31 +2590,25 @@ static void e1000_set_multi(struct net_device *netdev) | |||
2590 | 2590 | ||
2591 | ew32(RCTL, rctl); | 2591 | ew32(RCTL, rctl); |
2592 | 2592 | ||
2593 | if (netdev->mc_count) { | 2593 | if (!netdev_mc_empty(netdev)) { |
2594 | mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); | 2594 | mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); |
2595 | if (!mta_list) | 2595 | if (!mta_list) |
2596 | return; | 2596 | return; |
2597 | 2597 | ||
2598 | /* prepare a packed array of only addresses. */ | 2598 | /* prepare a packed array of only addresses. */ |
2599 | mc_ptr = netdev->mc_list; | 2599 | i = 0; |
2600 | 2600 | netdev_for_each_mc_addr(mc_ptr, netdev) | |
2601 | for (i = 0; i < netdev->mc_count; i++) { | 2601 | memcpy(mta_list + (i++ * ETH_ALEN), |
2602 | if (!mc_ptr) | 2602 | mc_ptr->dmi_addr, ETH_ALEN); |
2603 | break; | ||
2604 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, | ||
2605 | ETH_ALEN); | ||
2606 | mc_ptr = mc_ptr->next; | ||
2607 | } | ||
2608 | 2603 | ||
2609 | e1000_update_mc_addr_list(hw, mta_list, i, 1, | 2604 | e1000_update_mc_addr_list(hw, mta_list, i); |
2610 | mac->rar_entry_count); | ||
2611 | kfree(mta_list); | 2605 | kfree(mta_list); |
2612 | } else { | 2606 | } else { |
2613 | /* | 2607 | /* |
2614 | * if we're called from probe, we might not have | 2608 | * if we're called from probe, we might not have |
2615 | * anything to do here, so clear out the list | 2609 | * anything to do here, so clear out the list |
2616 | */ | 2610 | */ |
2617 | e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); | 2611 | e1000_update_mc_addr_list(hw, NULL, 0); |
2618 | } | 2612 | } |
2619 | } | 2613 | } |
2620 | 2614 | ||
@@ -2645,18 +2639,8 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
2645 | **/ | 2639 | **/ |
2646 | void e1000e_power_up_phy(struct e1000_adapter *adapter) | 2640 | void e1000e_power_up_phy(struct e1000_adapter *adapter) |
2647 | { | 2641 | { |
2648 | u16 mii_reg = 0; | 2642 | if (adapter->hw.phy.ops.power_up) |
2649 | 2643 | adapter->hw.phy.ops.power_up(&adapter->hw); | |
2650 | /* Just clear the power down bit to wake the phy back up */ | ||
2651 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { | ||
2652 | /* | ||
2653 | * According to the manual, the phy will retain its | ||
2654 | * settings across a power-down/up cycle | ||
2655 | */ | ||
2656 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); | ||
2657 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2658 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); | ||
2659 | } | ||
2660 | 2644 | ||
2661 | adapter->hw.mac.ops.setup_link(&adapter->hw); | 2645 | adapter->hw.mac.ops.setup_link(&adapter->hw); |
2662 | } | 2646 | } |
@@ -2664,35 +2648,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) | |||
2664 | /** | 2648 | /** |
2665 | * e1000_power_down_phy - Power down the PHY | 2649 | * e1000_power_down_phy - Power down the PHY |
2666 | * | 2650 | * |
2667 | * Power down the PHY so no link is implied when interface is down | 2651 | * Power down the PHY so no link is implied when interface is down. |
2668 | * The PHY cannot be powered down is management or WoL is active | 2652 | * The PHY cannot be powered down if management or WoL is active. |
2669 | */ | 2653 | */ |
2670 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 2654 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
2671 | { | 2655 | { |
2672 | struct e1000_hw *hw = &adapter->hw; | ||
2673 | u16 mii_reg; | ||
2674 | |||
2675 | /* WoL is enabled */ | 2656 | /* WoL is enabled */ |
2676 | if (adapter->wol) | 2657 | if (adapter->wol) |
2677 | return; | 2658 | return; |
2678 | 2659 | ||
2679 | /* non-copper PHY? */ | 2660 | if (adapter->hw.phy.ops.power_down) |
2680 | if (adapter->hw.phy.media_type != e1000_media_type_copper) | 2661 | adapter->hw.phy.ops.power_down(&adapter->hw); |
2681 | return; | ||
2682 | |||
2683 | /* reset is blocked because of a SoL/IDER session */ | ||
2684 | if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) | ||
2685 | return; | ||
2686 | |||
2687 | /* manageability (AMT) is enabled */ | ||
2688 | if (er32(MANC) & E1000_MANC_SMBUS_EN) | ||
2689 | return; | ||
2690 | |||
2691 | /* power down the PHY */ | ||
2692 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2693 | mii_reg |= MII_CR_POWER_DOWN; | ||
2694 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2695 | mdelay(1); | ||
2696 | } | 2662 | } |
2697 | 2663 | ||
2698 | /** | 2664 | /** |
@@ -2856,6 +2822,12 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
2856 | { | 2822 | { |
2857 | struct e1000_hw *hw = &adapter->hw; | 2823 | struct e1000_hw *hw = &adapter->hw; |
2858 | 2824 | ||
2825 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | ||
2826 | if (adapter->flags & FLAG_HAS_ERT) | ||
2827 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2828 | adapter->netdev->name, | ||
2829 | PM_QOS_DEFAULT_VALUE); | ||
2830 | |||
2859 | /* hardware has been reset, we need to reload some things */ | 2831 | /* hardware has been reset, we need to reload some things */ |
2860 | e1000_configure(adapter); | 2832 | e1000_configure(adapter); |
2861 | 2833 | ||
@@ -2906,7 +2878,6 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2906 | del_timer_sync(&adapter->watchdog_timer); | 2878 | del_timer_sync(&adapter->watchdog_timer); |
2907 | del_timer_sync(&adapter->phy_info_timer); | 2879 | del_timer_sync(&adapter->phy_info_timer); |
2908 | 2880 | ||
2909 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2910 | netif_carrier_off(netdev); | 2881 | netif_carrier_off(netdev); |
2911 | adapter->link_speed = 0; | 2882 | adapter->link_speed = 0; |
2912 | adapter->link_duplex = 0; | 2883 | adapter->link_duplex = 0; |
@@ -2916,6 +2887,10 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2916 | e1000_clean_tx_ring(adapter); | 2887 | e1000_clean_tx_ring(adapter); |
2917 | e1000_clean_rx_ring(adapter); | 2888 | e1000_clean_rx_ring(adapter); |
2918 | 2889 | ||
2890 | if (adapter->flags & FLAG_HAS_ERT) | ||
2891 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2892 | adapter->netdev->name); | ||
2893 | |||
2919 | /* | 2894 | /* |
2920 | * TODO: for power management, we could drop the link and | 2895 | * TODO: for power management, we could drop the link and |
2921 | * pci_disable_device here. | 2896 | * pci_disable_device here. |
@@ -2973,7 +2948,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
2973 | struct e1000_hw *hw = &adapter->hw; | 2948 | struct e1000_hw *hw = &adapter->hw; |
2974 | u32 icr = er32(ICR); | 2949 | u32 icr = er32(ICR); |
2975 | 2950 | ||
2976 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | 2951 | e_dbg("icr is %08X\n", icr); |
2977 | if (icr & E1000_ICR_RXSEQ) { | 2952 | if (icr & E1000_ICR_RXSEQ) { |
2978 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 2953 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
2979 | wmb(); | 2954 | wmb(); |
@@ -3010,7 +2985,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3010 | if (err) | 2985 | if (err) |
3011 | goto msi_test_failed; | 2986 | goto msi_test_failed; |
3012 | 2987 | ||
3013 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | 2988 | err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, |
3014 | netdev->name, netdev); | 2989 | netdev->name, netdev); |
3015 | if (err) { | 2990 | if (err) { |
3016 | pci_disable_msi(adapter->pdev); | 2991 | pci_disable_msi(adapter->pdev); |
@@ -3043,7 +3018,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3043 | goto msi_test_failed; | 3018 | goto msi_test_failed; |
3044 | 3019 | ||
3045 | /* okay so the test worked, restore settings */ | 3020 | /* okay so the test worked, restore settings */ |
3046 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | 3021 | e_dbg("MSI interrupt test succeeded!\n"); |
3047 | msi_test_failed: | 3022 | msi_test_failed: |
3048 | e1000e_set_interrupt_capability(adapter); | 3023 | e1000e_set_interrupt_capability(adapter); |
3049 | e1000_request_irq(adapter); | 3024 | e1000_request_irq(adapter); |
@@ -3304,6 +3279,7 @@ static void e1000_update_phy_info(unsigned long data) | |||
3304 | **/ | 3279 | **/ |
3305 | void e1000e_update_stats(struct e1000_adapter *adapter) | 3280 | void e1000e_update_stats(struct e1000_adapter *adapter) |
3306 | { | 3281 | { |
3282 | struct net_device *netdev = adapter->netdev; | ||
3307 | struct e1000_hw *hw = &adapter->hw; | 3283 | struct e1000_hw *hw = &adapter->hw; |
3308 | struct pci_dev *pdev = adapter->pdev; | 3284 | struct pci_dev *pdev = adapter->pdev; |
3309 | u16 phy_data; | 3285 | u16 phy_data; |
@@ -3329,24 +3305,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3329 | if ((hw->phy.type == e1000_phy_82578) || | 3305 | if ((hw->phy.type == e1000_phy_82578) || |
3330 | (hw->phy.type == e1000_phy_82577)) { | 3306 | (hw->phy.type == e1000_phy_82577)) { |
3331 | e1e_rphy(hw, HV_SCC_UPPER, &phy_data); | 3307 | e1e_rphy(hw, HV_SCC_UPPER, &phy_data); |
3332 | e1e_rphy(hw, HV_SCC_LOWER, &phy_data); | 3308 | if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) |
3333 | adapter->stats.scc += phy_data; | 3309 | adapter->stats.scc += phy_data; |
3334 | 3310 | ||
3335 | e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); | 3311 | e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); |
3336 | e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); | 3312 | if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) |
3337 | adapter->stats.ecol += phy_data; | 3313 | adapter->stats.ecol += phy_data; |
3338 | 3314 | ||
3339 | e1e_rphy(hw, HV_MCC_UPPER, &phy_data); | 3315 | e1e_rphy(hw, HV_MCC_UPPER, &phy_data); |
3340 | e1e_rphy(hw, HV_MCC_LOWER, &phy_data); | 3316 | if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) |
3341 | adapter->stats.mcc += phy_data; | 3317 | adapter->stats.mcc += phy_data; |
3342 | 3318 | ||
3343 | e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); | 3319 | e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); |
3344 | e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); | 3320 | if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) |
3345 | adapter->stats.latecol += phy_data; | 3321 | adapter->stats.latecol += phy_data; |
3346 | 3322 | ||
3347 | e1e_rphy(hw, HV_DC_UPPER, &phy_data); | 3323 | e1e_rphy(hw, HV_DC_UPPER, &phy_data); |
3348 | e1e_rphy(hw, HV_DC_LOWER, &phy_data); | 3324 | if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) |
3349 | adapter->stats.dc += phy_data; | 3325 | adapter->stats.dc += phy_data; |
3350 | } else { | 3326 | } else { |
3351 | adapter->stats.scc += er32(SCC); | 3327 | adapter->stats.scc += er32(SCC); |
3352 | adapter->stats.ecol += er32(ECOL); | 3328 | adapter->stats.ecol += er32(ECOL); |
@@ -3374,8 +3350,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3374 | if ((hw->phy.type == e1000_phy_82578) || | 3350 | if ((hw->phy.type == e1000_phy_82578) || |
3375 | (hw->phy.type == e1000_phy_82577)) { | 3351 | (hw->phy.type == e1000_phy_82577)) { |
3376 | e1e_rphy(hw, HV_COLC_UPPER, &phy_data); | 3352 | e1e_rphy(hw, HV_COLC_UPPER, &phy_data); |
3377 | e1e_rphy(hw, HV_COLC_LOWER, &phy_data); | 3353 | if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) |
3378 | hw->mac.collision_delta = phy_data; | 3354 | hw->mac.collision_delta = phy_data; |
3379 | } else { | 3355 | } else { |
3380 | hw->mac.collision_delta = er32(COLC); | 3356 | hw->mac.collision_delta = er32(COLC); |
3381 | } | 3357 | } |
@@ -3386,8 +3362,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3386 | if ((hw->phy.type == e1000_phy_82578) || | 3362 | if ((hw->phy.type == e1000_phy_82578) || |
3387 | (hw->phy.type == e1000_phy_82577)) { | 3363 | (hw->phy.type == e1000_phy_82577)) { |
3388 | e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); | 3364 | e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); |
3389 | e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); | 3365 | if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) |
3390 | adapter->stats.tncrs += phy_data; | 3366 | adapter->stats.tncrs += phy_data; |
3391 | } else { | 3367 | } else { |
3392 | if ((hw->mac.type != e1000_82574) && | 3368 | if ((hw->mac.type != e1000_82574) && |
3393 | (hw->mac.type != e1000_82583)) | 3369 | (hw->mac.type != e1000_82583)) |
@@ -3398,8 +3374,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3398 | adapter->stats.tsctfc += er32(TSCTFC); | 3374 | adapter->stats.tsctfc += er32(TSCTFC); |
3399 | 3375 | ||
3400 | /* Fill out the OS statistics structure */ | 3376 | /* Fill out the OS statistics structure */ |
3401 | adapter->net_stats.multicast = adapter->stats.mprc; | 3377 | netdev->stats.multicast = adapter->stats.mprc; |
3402 | adapter->net_stats.collisions = adapter->stats.colc; | 3378 | netdev->stats.collisions = adapter->stats.colc; |
3403 | 3379 | ||
3404 | /* Rx Errors */ | 3380 | /* Rx Errors */ |
3405 | 3381 | ||
@@ -3407,22 +3383,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3407 | * RLEC on some newer hardware can be incorrect so build | 3383 | * RLEC on some newer hardware can be incorrect so build |
3408 | * our own version based on RUC and ROC | 3384 | * our own version based on RUC and ROC |
3409 | */ | 3385 | */ |
3410 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3386 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3411 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3387 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3412 | adapter->stats.ruc + adapter->stats.roc + | 3388 | adapter->stats.ruc + adapter->stats.roc + |
3413 | adapter->stats.cexterr; | 3389 | adapter->stats.cexterr; |
3414 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 3390 | netdev->stats.rx_length_errors = adapter->stats.ruc + |
3415 | adapter->stats.roc; | 3391 | adapter->stats.roc; |
3416 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3392 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
3417 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3393 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
3418 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3394 | netdev->stats.rx_missed_errors = adapter->stats.mpc; |
3419 | 3395 | ||
3420 | /* Tx Errors */ | 3396 | /* Tx Errors */ |
3421 | adapter->net_stats.tx_errors = adapter->stats.ecol + | 3397 | netdev->stats.tx_errors = adapter->stats.ecol + |
3422 | adapter->stats.latecol; | 3398 | adapter->stats.latecol; |
3423 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3399 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
3424 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3400 | netdev->stats.tx_window_errors = adapter->stats.latecol; |
3425 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3401 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
3426 | 3402 | ||
3427 | /* Tx Dropped needs to be maintained elsewhere */ | 3403 | /* Tx Dropped needs to be maintained elsewhere */ |
3428 | 3404 | ||
@@ -3491,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) | |||
3491 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | 3467 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); |
3492 | } | 3468 | } |
3493 | 3469 | ||
3494 | bool e1000_has_link(struct e1000_adapter *adapter) | 3470 | bool e1000e_has_link(struct e1000_adapter *adapter) |
3495 | { | 3471 | { |
3496 | struct e1000_hw *hw = &adapter->hw; | 3472 | struct e1000_hw *hw = &adapter->hw; |
3497 | bool link_active = 0; | 3473 | bool link_active = 0; |
@@ -3572,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3572 | u32 link, tctl; | 3548 | u32 link, tctl; |
3573 | int tx_pending = 0; | 3549 | int tx_pending = 0; |
3574 | 3550 | ||
3575 | link = e1000_has_link(adapter); | 3551 | link = e1000e_has_link(adapter); |
3576 | if ((netif_carrier_ok(netdev)) && link) { | 3552 | if ((netif_carrier_ok(netdev)) && link) { |
3577 | e1000e_enable_receives(adapter); | 3553 | e1000e_enable_receives(adapter); |
3578 | goto link_up; | 3554 | goto link_up; |
@@ -3612,21 +3588,15 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3612 | "link gets many collisions.\n"); | 3588 | "link gets many collisions.\n"); |
3613 | } | 3589 | } |
3614 | 3590 | ||
3615 | /* | 3591 | /* adjust timeout factor according to speed/duplex */ |
3616 | * tweak tx_queue_len according to speed/duplex | ||
3617 | * and adjust the timeout factor | ||
3618 | */ | ||
3619 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
3620 | adapter->tx_timeout_factor = 1; | 3592 | adapter->tx_timeout_factor = 1; |
3621 | switch (adapter->link_speed) { | 3593 | switch (adapter->link_speed) { |
3622 | case SPEED_10: | 3594 | case SPEED_10: |
3623 | txb2b = 0; | 3595 | txb2b = 0; |
3624 | netdev->tx_queue_len = 10; | ||
3625 | adapter->tx_timeout_factor = 16; | 3596 | adapter->tx_timeout_factor = 16; |
3626 | break; | 3597 | break; |
3627 | case SPEED_100: | 3598 | case SPEED_100: |
3628 | txb2b = 0; | 3599 | txb2b = 0; |
3629 | netdev->tx_queue_len = 100; | ||
3630 | adapter->tx_timeout_factor = 10; | 3600 | adapter->tx_timeout_factor = 10; |
3631 | break; | 3601 | break; |
3632 | } | 3602 | } |
@@ -3776,68 +3746,64 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3776 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | 3746 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
3777 | int err; | 3747 | int err; |
3778 | 3748 | ||
3779 | if (skb_is_gso(skb)) { | 3749 | if (!skb_is_gso(skb)) |
3780 | if (skb_header_cloned(skb)) { | 3750 | return 0; |
3781 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
3782 | if (err) | ||
3783 | return err; | ||
3784 | } | ||
3785 | 3751 | ||
3786 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3752 | if (skb_header_cloned(skb)) { |
3787 | mss = skb_shinfo(skb)->gso_size; | 3753 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
3788 | if (skb->protocol == htons(ETH_P_IP)) { | 3754 | if (err) |
3789 | struct iphdr *iph = ip_hdr(skb); | 3755 | return err; |
3790 | iph->tot_len = 0; | 3756 | } |
3791 | iph->check = 0; | ||
3792 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
3793 | iph->daddr, 0, | ||
3794 | IPPROTO_TCP, | ||
3795 | 0); | ||
3796 | cmd_length = E1000_TXD_CMD_IP; | ||
3797 | ipcse = skb_transport_offset(skb) - 1; | ||
3798 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3799 | ipv6_hdr(skb)->payload_len = 0; | ||
3800 | tcp_hdr(skb)->check = | ||
3801 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3802 | &ipv6_hdr(skb)->daddr, | ||
3803 | 0, IPPROTO_TCP, 0); | ||
3804 | ipcse = 0; | ||
3805 | } | ||
3806 | ipcss = skb_network_offset(skb); | ||
3807 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3808 | tucss = skb_transport_offset(skb); | ||
3809 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3810 | tucse = 0; | ||
3811 | 3757 | ||
3812 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 3758 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3813 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | 3759 | mss = skb_shinfo(skb)->gso_size; |
3760 | if (skb->protocol == htons(ETH_P_IP)) { | ||
3761 | struct iphdr *iph = ip_hdr(skb); | ||
3762 | iph->tot_len = 0; | ||
3763 | iph->check = 0; | ||
3764 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
3765 | 0, IPPROTO_TCP, 0); | ||
3766 | cmd_length = E1000_TXD_CMD_IP; | ||
3767 | ipcse = skb_transport_offset(skb) - 1; | ||
3768 | } else if (skb_is_gso_v6(skb)) { | ||
3769 | ipv6_hdr(skb)->payload_len = 0; | ||
3770 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3771 | &ipv6_hdr(skb)->daddr, | ||
3772 | 0, IPPROTO_TCP, 0); | ||
3773 | ipcse = 0; | ||
3774 | } | ||
3775 | ipcss = skb_network_offset(skb); | ||
3776 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3777 | tucss = skb_transport_offset(skb); | ||
3778 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3779 | tucse = 0; | ||
3814 | 3780 | ||
3815 | i = tx_ring->next_to_use; | 3781 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
3816 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 3782 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
3817 | buffer_info = &tx_ring->buffer_info[i]; | ||
3818 | 3783 | ||
3819 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | 3784 | i = tx_ring->next_to_use; |
3820 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | 3785 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3821 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | 3786 | buffer_info = &tx_ring->buffer_info[i]; |
3822 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3823 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3824 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3825 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3826 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3827 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3828 | 3787 | ||
3829 | buffer_info->time_stamp = jiffies; | 3788 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
3830 | buffer_info->next_to_watch = i; | 3789 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
3790 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | ||
3791 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3792 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3793 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3794 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3795 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3796 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3831 | 3797 | ||
3832 | i++; | 3798 | buffer_info->time_stamp = jiffies; |
3833 | if (i == tx_ring->count) | 3799 | buffer_info->next_to_watch = i; |
3834 | i = 0; | ||
3835 | tx_ring->next_to_use = i; | ||
3836 | 3800 | ||
3837 | return 1; | 3801 | i++; |
3838 | } | 3802 | if (i == tx_ring->count) |
3803 | i = 0; | ||
3804 | tx_ring->next_to_use = i; | ||
3839 | 3805 | ||
3840 | return 0; | 3806 | return 1; |
3841 | } | 3807 | } |
3842 | 3808 | ||
3843 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | 3809 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
@@ -3909,23 +3875,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3909 | unsigned int mss) | 3875 | unsigned int mss) |
3910 | { | 3876 | { |
3911 | struct e1000_ring *tx_ring = adapter->tx_ring; | 3877 | struct e1000_ring *tx_ring = adapter->tx_ring; |
3878 | struct pci_dev *pdev = adapter->pdev; | ||
3912 | struct e1000_buffer *buffer_info; | 3879 | struct e1000_buffer *buffer_info; |
3913 | unsigned int len = skb_headlen(skb); | 3880 | unsigned int len = skb_headlen(skb); |
3914 | unsigned int offset, size, count = 0, i; | 3881 | unsigned int offset = 0, size, count = 0, i; |
3915 | unsigned int f; | 3882 | unsigned int f; |
3916 | dma_addr_t *map; | ||
3917 | 3883 | ||
3918 | i = tx_ring->next_to_use; | 3884 | i = tx_ring->next_to_use; |
3919 | 3885 | ||
3920 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
3921 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
3922 | adapter->tx_dma_failed++; | ||
3923 | return 0; | ||
3924 | } | ||
3925 | |||
3926 | map = skb_shinfo(skb)->dma_maps; | ||
3927 | offset = 0; | ||
3928 | |||
3929 | while (len) { | 3886 | while (len) { |
3930 | buffer_info = &tx_ring->buffer_info[i]; | 3887 | buffer_info = &tx_ring->buffer_info[i]; |
3931 | size = min(len, max_per_txd); | 3888 | size = min(len, max_per_txd); |
@@ -3933,11 +3890,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3933 | buffer_info->length = size; | 3890 | buffer_info->length = size; |
3934 | buffer_info->time_stamp = jiffies; | 3891 | buffer_info->time_stamp = jiffies; |
3935 | buffer_info->next_to_watch = i; | 3892 | buffer_info->next_to_watch = i; |
3936 | buffer_info->dma = skb_shinfo(skb)->dma_head + offset; | 3893 | buffer_info->dma = pci_map_single(pdev, skb->data + offset, |
3937 | count++; | 3894 | size, PCI_DMA_TODEVICE); |
3895 | buffer_info->mapped_as_page = false; | ||
3896 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3897 | goto dma_error; | ||
3938 | 3898 | ||
3939 | len -= size; | 3899 | len -= size; |
3940 | offset += size; | 3900 | offset += size; |
3901 | count++; | ||
3941 | 3902 | ||
3942 | if (len) { | 3903 | if (len) { |
3943 | i++; | 3904 | i++; |
@@ -3951,7 +3912,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3951 | 3912 | ||
3952 | frag = &skb_shinfo(skb)->frags[f]; | 3913 | frag = &skb_shinfo(skb)->frags[f]; |
3953 | len = frag->size; | 3914 | len = frag->size; |
3954 | offset = 0; | 3915 | offset = frag->page_offset; |
3955 | 3916 | ||
3956 | while (len) { | 3917 | while (len) { |
3957 | i++; | 3918 | i++; |
@@ -3964,7 +3925,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3964 | buffer_info->length = size; | 3925 | buffer_info->length = size; |
3965 | buffer_info->time_stamp = jiffies; | 3926 | buffer_info->time_stamp = jiffies; |
3966 | buffer_info->next_to_watch = i; | 3927 | buffer_info->next_to_watch = i; |
3967 | buffer_info->dma = map[f] + offset; | 3928 | buffer_info->dma = pci_map_page(pdev, frag->page, |
3929 | offset, size, | ||
3930 | PCI_DMA_TODEVICE); | ||
3931 | buffer_info->mapped_as_page = true; | ||
3932 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3933 | goto dma_error; | ||
3968 | 3934 | ||
3969 | len -= size; | 3935 | len -= size; |
3970 | offset += size; | 3936 | offset += size; |
@@ -3976,6 +3942,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3976 | tx_ring->buffer_info[first].next_to_watch = i; | 3942 | tx_ring->buffer_info[first].next_to_watch = i; |
3977 | 3943 | ||
3978 | return count; | 3944 | return count; |
3945 | |||
3946 | dma_error: | ||
3947 | dev_err(&pdev->dev, "TX DMA map failed\n"); | ||
3948 | buffer_info->dma = 0; | ||
3949 | if (count) | ||
3950 | count--; | ||
3951 | |||
3952 | while (count--) { | ||
3953 | if (i==0) | ||
3954 | i += tx_ring->count; | ||
3955 | i--; | ||
3956 | buffer_info = &tx_ring->buffer_info[i]; | ||
3957 | e1000_put_txbuf(adapter, buffer_info);; | ||
3958 | } | ||
3959 | |||
3960 | return 0; | ||
3979 | } | 3961 | } |
3980 | 3962 | ||
3981 | static void e1000_tx_queue(struct e1000_adapter *adapter, | 3963 | static void e1000_tx_queue(struct e1000_adapter *adapter, |
@@ -4048,8 +4030,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |||
4048 | u16 length, offset; | 4030 | u16 length, offset; |
4049 | 4031 | ||
4050 | if (vlan_tx_tag_present(skb)) { | 4032 | if (vlan_tx_tag_present(skb)) { |
4051 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) | 4033 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
4052 | && (adapter->hw.mng_cookie.status & | 4034 | (adapter->hw.mng_cookie.status & |
4053 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | 4035 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) |
4054 | return 0; | 4036 | return 0; |
4055 | } | 4037 | } |
@@ -4271,10 +4253,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
4271 | **/ | 4253 | **/ |
4272 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | 4254 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
4273 | { | 4255 | { |
4274 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4275 | |||
4276 | /* only return the current stats */ | 4256 | /* only return the current stats */ |
4277 | return &adapter->net_stats; | 4257 | return &netdev->stats; |
4278 | } | 4258 | } |
4279 | 4259 | ||
4280 | /** | 4260 | /** |
@@ -4303,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4303 | return -EINVAL; | 4283 | return -EINVAL; |
4304 | } | 4284 | } |
4305 | 4285 | ||
4286 | /* 82573 Errata 17 */ | ||
4287 | if (((adapter->hw.mac.type == e1000_82573) || | ||
4288 | (adapter->hw.mac.type == e1000_82574)) && | ||
4289 | (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { | ||
4290 | adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; | ||
4291 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); | ||
4292 | } | ||
4293 | |||
4306 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4294 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4307 | msleep(1); | 4295 | msleep(1); |
4308 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ | 4296 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
@@ -4321,13 +4309,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4321 | * fragmented skbs | 4309 | * fragmented skbs |
4322 | */ | 4310 | */ |
4323 | 4311 | ||
4324 | if (max_frame <= 256) | 4312 | if (max_frame <= 2048) |
4325 | adapter->rx_buffer_len = 256; | ||
4326 | else if (max_frame <= 512) | ||
4327 | adapter->rx_buffer_len = 512; | ||
4328 | else if (max_frame <= 1024) | ||
4329 | adapter->rx_buffer_len = 1024; | ||
4330 | else if (max_frame <= 2048) | ||
4331 | adapter->rx_buffer_len = 2048; | 4313 | adapter->rx_buffer_len = 2048; |
4332 | else | 4314 | else |
4333 | adapter->rx_buffer_len = 4096; | 4315 | adapter->rx_buffer_len = 4096; |
@@ -4362,6 +4344,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4362 | data->phy_id = adapter->hw.phy.addr; | 4344 | data->phy_id = adapter->hw.phy.addr; |
4363 | break; | 4345 | break; |
4364 | case SIOCGMIIREG: | 4346 | case SIOCGMIIREG: |
4347 | e1000_phy_read_status(adapter); | ||
4348 | |||
4365 | switch (data->reg_num & 0x1F) { | 4349 | switch (data->reg_num & 0x1F) { |
4366 | case MII_BMCR: | 4350 | case MII_BMCR: |
4367 | data->val_out = adapter->phy_regs.bmcr; | 4351 | data->val_out = adapter->phy_regs.bmcr; |
@@ -4469,7 +4453,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4469 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | 4453 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); |
4470 | 4454 | ||
4471 | /* activate PHY wakeup */ | 4455 | /* activate PHY wakeup */ |
4472 | retval = hw->phy.ops.acquire_phy(hw); | 4456 | retval = hw->phy.ops.acquire(hw); |
4473 | if (retval) { | 4457 | if (retval) { |
4474 | e_err("Could not acquire PHY\n"); | 4458 | e_err("Could not acquire PHY\n"); |
4475 | return retval; | 4459 | return retval; |
@@ -4486,7 +4470,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4486 | if (retval) | 4470 | if (retval) |
4487 | e_err("Could not set PHY Host Wakeup bit\n"); | 4471 | e_err("Could not set PHY Host Wakeup bit\n"); |
4488 | out: | 4472 | out: |
4489 | hw->phy.ops.release_phy(hw); | 4473 | hw->phy.ops.release(hw); |
4490 | 4474 | ||
4491 | return retval; | 4475 | return retval; |
4492 | } | 4476 | } |
@@ -4543,7 +4527,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4543 | e1000_media_type_internal_serdes) { | 4527 | e1000_media_type_internal_serdes) { |
4544 | /* keep the laser running in D3 */ | 4528 | /* keep the laser running in D3 */ |
4545 | ctrl_ext = er32(CTRL_EXT); | 4529 | ctrl_ext = er32(CTRL_EXT); |
4546 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | 4530 | ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; |
4547 | ew32(CTRL_EXT, ctrl_ext); | 4531 | ew32(CTRL_EXT, ctrl_ext); |
4548 | } | 4532 | } |
4549 | 4533 | ||
@@ -4629,29 +4613,42 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |||
4629 | } | 4613 | } |
4630 | } | 4614 | } |
4631 | 4615 | ||
4632 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) | 4616 | #ifdef CONFIG_PCIEASPM |
4617 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4618 | { | ||
4619 | pci_disable_link_state(pdev, state); | ||
4620 | } | ||
4621 | #else | ||
4622 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4633 | { | 4623 | { |
4634 | int pos; | 4624 | int pos; |
4635 | u16 val; | 4625 | u16 reg16; |
4636 | 4626 | ||
4637 | /* | 4627 | /* |
4638 | * 82573 workaround - disable L1 ASPM on mobile chipsets | 4628 | * Both device and parent should have the same ASPM setting. |
4639 | * | 4629 | * Disable ASPM in downstream component first and then upstream. |
4640 | * L1 ASPM on various mobile (ich7) chipsets do not behave properly | ||
4641 | * resulting in lost data or garbage information on the pci-e link | ||
4642 | * level. This could result in (false) bad EEPROM checksum errors, | ||
4643 | * long ping times (up to 2s) or even a system freeze/hang. | ||
4644 | * | ||
4645 | * Unfortunately this feature saves about 1W power consumption when | ||
4646 | * active. | ||
4647 | */ | 4630 | */ |
4648 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 4631 | pos = pci_pcie_cap(pdev); |
4649 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); | 4632 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
4650 | if (val & 0x2) { | 4633 | reg16 &= ~state; |
4651 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); | 4634 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
4652 | val &= ~0x2; | 4635 | |
4653 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); | 4636 | if (!pdev->bus->self) |
4654 | } | 4637 | return; |
4638 | |||
4639 | pos = pci_pcie_cap(pdev->bus->self); | ||
4640 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); | ||
4641 | reg16 &= ~state; | ||
4642 | pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); | ||
4643 | } | ||
4644 | #endif | ||
4645 | void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4646 | { | ||
4647 | dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | ||
4648 | (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", | ||
4649 | (state & PCIE_LINK_STATE_L1) ? "L1" : ""); | ||
4650 | |||
4651 | __e1000e_disable_aspm(pdev, state); | ||
4655 | } | 4652 | } |
4656 | 4653 | ||
4657 | #ifdef CONFIG_PM | 4654 | #ifdef CONFIG_PM |
@@ -4676,7 +4673,9 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4676 | 4673 | ||
4677 | pci_set_power_state(pdev, PCI_D0); | 4674 | pci_set_power_state(pdev, PCI_D0); |
4678 | pci_restore_state(pdev); | 4675 | pci_restore_state(pdev); |
4679 | e1000e_disable_l1aspm(pdev); | 4676 | pci_save_state(pdev); |
4677 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) | ||
4678 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4680 | 4679 | ||
4681 | err = pci_enable_device_mem(pdev); | 4680 | err = pci_enable_device_mem(pdev); |
4682 | if (err) { | 4681 | if (err) { |
@@ -4818,7 +4817,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4818 | int err; | 4817 | int err; |
4819 | pci_ers_result_t result; | 4818 | pci_ers_result_t result; |
4820 | 4819 | ||
4821 | e1000e_disable_l1aspm(pdev); | 4820 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) |
4821 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4822 | err = pci_enable_device_mem(pdev); | 4822 | err = pci_enable_device_mem(pdev); |
4823 | if (err) { | 4823 | if (err) { |
4824 | dev_err(&pdev->dev, | 4824 | dev_err(&pdev->dev, |
@@ -4827,6 +4827,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4827 | } else { | 4827 | } else { |
4828 | pci_set_master(pdev); | 4828 | pci_set_master(pdev); |
4829 | pci_restore_state(pdev); | 4829 | pci_restore_state(pdev); |
4830 | pci_save_state(pdev); | ||
4830 | 4831 | ||
4831 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4832 | pci_enable_wake(pdev, PCI_D3hot, 0); |
4832 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4833 | pci_enable_wake(pdev, PCI_D3cold, 0); |
@@ -4911,13 +4912,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) | |||
4911 | dev_warn(&adapter->pdev->dev, | 4912 | dev_warn(&adapter->pdev->dev, |
4912 | "Warning: detected DSPD enabled in EEPROM\n"); | 4913 | "Warning: detected DSPD enabled in EEPROM\n"); |
4913 | } | 4914 | } |
4914 | |||
4915 | ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); | ||
4916 | if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) { | ||
4917 | /* ASPM enable */ | ||
4918 | dev_warn(&adapter->pdev->dev, | ||
4919 | "Warning: detected ASPM enabled in EEPROM\n"); | ||
4920 | } | ||
4921 | } | 4915 | } |
4922 | 4916 | ||
4923 | static const struct net_device_ops e1000e_netdev_ops = { | 4917 | static const struct net_device_ops e1000e_netdev_ops = { |
@@ -4966,7 +4960,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4966 | u16 eeprom_data = 0; | 4960 | u16 eeprom_data = 0; |
4967 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 4961 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
4968 | 4962 | ||
4969 | e1000e_disable_l1aspm(pdev); | 4963 | if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) |
4964 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4970 | 4965 | ||
4971 | err = pci_enable_device_mem(pdev); | 4966 | err = pci_enable_device_mem(pdev); |
4972 | if (err) | 4967 | if (err) |
@@ -5135,7 +5130,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5135 | 5130 | ||
5136 | e1000_eeprom_checks(adapter); | 5131 | e1000_eeprom_checks(adapter); |
5137 | 5132 | ||
5138 | /* copy the MAC address out of the NVM */ | 5133 | /* copy the MAC address */ |
5139 | if (e1000e_read_mac_addr(&adapter->hw)) | 5134 | if (e1000e_read_mac_addr(&adapter->hw)) |
5140 | e_err("NVM Read Error while reading MAC address\n"); | 5135 | e_err("NVM Read Error while reading MAC address\n"); |
5141 | 5136 | ||
@@ -5160,6 +5155,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5160 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | 5155 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
5161 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | 5156 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); |
5162 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | 5157 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
5158 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | ||
5163 | 5159 | ||
5164 | /* Initialize link parameters. User can change them with ethtool */ | 5160 | /* Initialize link parameters. User can change them with ethtool */ |
5165 | adapter->hw.mac.autoneg = 1; | 5161 | adapter->hw.mac.autoneg = 1; |
@@ -5283,19 +5279,24 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5283 | del_timer_sync(&adapter->watchdog_timer); | 5279 | del_timer_sync(&adapter->watchdog_timer); |
5284 | del_timer_sync(&adapter->phy_info_timer); | 5280 | del_timer_sync(&adapter->phy_info_timer); |
5285 | 5281 | ||
5282 | cancel_work_sync(&adapter->reset_task); | ||
5283 | cancel_work_sync(&adapter->watchdog_task); | ||
5284 | cancel_work_sync(&adapter->downshift_task); | ||
5285 | cancel_work_sync(&adapter->update_phy_task); | ||
5286 | cancel_work_sync(&adapter->print_hang_task); | ||
5286 | flush_scheduled_work(); | 5287 | flush_scheduled_work(); |
5287 | 5288 | ||
5289 | if (!(netdev->flags & IFF_UP)) | ||
5290 | e1000_power_down_phy(adapter); | ||
5291 | |||
5292 | unregister_netdev(netdev); | ||
5293 | |||
5288 | /* | 5294 | /* |
5289 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5295 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5290 | * would have already happened in close and is redundant. | 5296 | * would have already happened in close and is redundant. |
5291 | */ | 5297 | */ |
5292 | e1000_release_hw_control(adapter); | 5298 | e1000_release_hw_control(adapter); |
5293 | 5299 | ||
5294 | unregister_netdev(netdev); | ||
5295 | |||
5296 | if (!e1000_check_reset_block(&adapter->hw)) | ||
5297 | e1000_phy_hw_reset(&adapter->hw); | ||
5298 | |||
5299 | e1000e_reset_interrupt_capability(adapter); | 5300 | e1000e_reset_interrupt_capability(adapter); |
5300 | kfree(adapter->tx_ring); | 5301 | kfree(adapter->tx_ring); |
5301 | kfree(adapter->rx_ring); | 5302 | kfree(adapter->rx_ring); |
@@ -5321,7 +5322,7 @@ static struct pci_error_handlers e1000_err_handler = { | |||
5321 | .resume = e1000_io_resume, | 5322 | .resume = e1000_io_resume, |
5322 | }; | 5323 | }; |
5323 | 5324 | ||
5324 | static struct pci_device_id e1000_pci_tbl[] = { | 5325 | static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { |
5325 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, | 5326 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, |
5326 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, | 5327 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, |
5327 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, | 5328 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, |
@@ -5361,6 +5362,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
5361 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | 5362 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, |
5362 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | 5363 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, |
5363 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | 5364 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, |
5365 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, | ||
5364 | 5366 | ||
5365 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, | 5367 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
5366 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | 5368 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, |
@@ -5414,12 +5416,10 @@ static int __init e1000_init_module(void) | |||
5414 | int ret; | 5416 | int ret; |
5415 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | 5417 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
5416 | e1000e_driver_name, e1000e_driver_version); | 5418 | e1000e_driver_name, e1000e_driver_version); |
5417 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", | 5419 | printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", |
5418 | e1000e_driver_name); | 5420 | e1000e_driver_name); |
5419 | ret = pci_register_driver(&e1000_driver); | 5421 | ret = pci_register_driver(&e1000_driver); |
5420 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, | 5422 | |
5421 | PM_QOS_DEFAULT_VALUE); | ||
5422 | |||
5423 | return ret; | 5423 | return ret; |
5424 | } | 5424 | } |
5425 | module_init(e1000_init_module); | 5425 | module_init(e1000_init_module); |
@@ -5433,7 +5433,6 @@ module_init(e1000_init_module); | |||
5433 | static void __exit e1000_exit_module(void) | 5433 | static void __exit e1000_exit_module(void) |
5434 | { | 5434 | { |
5435 | pci_unregister_driver(&e1000_driver); | 5435 | pci_unregister_driver(&e1000_driver); |
5436 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); | ||
5437 | } | 5436 | } |
5438 | module_exit(e1000_exit_module); | 5437 | module_exit(e1000_exit_module); |
5439 | 5438 | ||
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index 1342e0b1815c..2e399778cae5 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 85f955f70417..7f3ceb9dad6a 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -44,6 +44,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
44 | /* Cable length tables */ | 44 | /* Cable length tables */ |
45 | static const u16 e1000_m88_cable_length_table[] = | 45 | static const u16 e1000_m88_cable_length_table[] = |
46 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | 46 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; |
47 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | ||
48 | ARRAY_SIZE(e1000_m88_cable_length_table) | ||
47 | 49 | ||
48 | static const u16 e1000_igp_2_cable_length_table[] = | 50 | static const u16 e1000_igp_2_cable_length_table[] = |
49 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, | 51 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, |
@@ -130,7 +132,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
130 | u16 phy_id; | 132 | u16 phy_id; |
131 | u16 retry_count = 0; | 133 | u16 retry_count = 0; |
132 | 134 | ||
133 | if (!(phy->ops.read_phy_reg)) | 135 | if (!(phy->ops.read_reg)) |
134 | goto out; | 136 | goto out; |
135 | 137 | ||
136 | while (retry_count < 2) { | 138 | while (retry_count < 2) { |
@@ -150,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
150 | if (phy->id != 0 && phy->id != PHY_REVISION_MASK) | 152 | if (phy->id != 0 && phy->id != PHY_REVISION_MASK) |
151 | goto out; | 153 | goto out; |
152 | 154 | ||
153 | /* | ||
154 | * If the PHY ID is still unknown, we may have an 82577i | ||
155 | * without link. We will try again after setting Slow | ||
156 | * MDIC mode. No harm in trying again in this case since | ||
157 | * the PHY ID is unknown at this point anyway | ||
158 | */ | ||
159 | ret_val = phy->ops.acquire_phy(hw); | ||
160 | if (ret_val) | ||
161 | goto out; | ||
162 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | ||
163 | if (ret_val) | ||
164 | goto out; | ||
165 | phy->ops.release_phy(hw); | ||
166 | |||
167 | retry_count++; | 155 | retry_count++; |
168 | } | 156 | } |
169 | out: | 157 | out: |
170 | /* Revert to MDIO fast mode, if applicable */ | ||
171 | if (retry_count) { | ||
172 | ret_val = phy->ops.acquire_phy(hw); | ||
173 | if (ret_val) | ||
174 | return ret_val; | ||
175 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | ||
176 | phy->ops.release_phy(hw); | ||
177 | } | ||
178 | |||
179 | return ret_val; | 158 | return ret_val; |
180 | } | 159 | } |
181 | 160 | ||
@@ -211,7 +190,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
211 | u32 i, mdic = 0; | 190 | u32 i, mdic = 0; |
212 | 191 | ||
213 | if (offset > MAX_PHY_REG_ADDRESS) { | 192 | if (offset > MAX_PHY_REG_ADDRESS) { |
214 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 193 | e_dbg("PHY Address %d is out of range\n", offset); |
215 | return -E1000_ERR_PARAM; | 194 | return -E1000_ERR_PARAM; |
216 | } | 195 | } |
217 | 196 | ||
@@ -238,11 +217,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
238 | break; | 217 | break; |
239 | } | 218 | } |
240 | if (!(mdic & E1000_MDIC_READY)) { | 219 | if (!(mdic & E1000_MDIC_READY)) { |
241 | hw_dbg(hw, "MDI Read did not complete\n"); | 220 | e_dbg("MDI Read did not complete\n"); |
242 | return -E1000_ERR_PHY; | 221 | return -E1000_ERR_PHY; |
243 | } | 222 | } |
244 | if (mdic & E1000_MDIC_ERROR) { | 223 | if (mdic & E1000_MDIC_ERROR) { |
245 | hw_dbg(hw, "MDI Error\n"); | 224 | e_dbg("MDI Error\n"); |
246 | return -E1000_ERR_PHY; | 225 | return -E1000_ERR_PHY; |
247 | } | 226 | } |
248 | *data = (u16) mdic; | 227 | *data = (u16) mdic; |
@@ -264,7 +243,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
264 | u32 i, mdic = 0; | 243 | u32 i, mdic = 0; |
265 | 244 | ||
266 | if (offset > MAX_PHY_REG_ADDRESS) { | 245 | if (offset > MAX_PHY_REG_ADDRESS) { |
267 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 246 | e_dbg("PHY Address %d is out of range\n", offset); |
268 | return -E1000_ERR_PARAM; | 247 | return -E1000_ERR_PARAM; |
269 | } | 248 | } |
270 | 249 | ||
@@ -292,11 +271,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
292 | break; | 271 | break; |
293 | } | 272 | } |
294 | if (!(mdic & E1000_MDIC_READY)) { | 273 | if (!(mdic & E1000_MDIC_READY)) { |
295 | hw_dbg(hw, "MDI Write did not complete\n"); | 274 | e_dbg("MDI Write did not complete\n"); |
296 | return -E1000_ERR_PHY; | 275 | return -E1000_ERR_PHY; |
297 | } | 276 | } |
298 | if (mdic & E1000_MDIC_ERROR) { | 277 | if (mdic & E1000_MDIC_ERROR) { |
299 | hw_dbg(hw, "MDI Error\n"); | 278 | e_dbg("MDI Error\n"); |
300 | return -E1000_ERR_PHY; | 279 | return -E1000_ERR_PHY; |
301 | } | 280 | } |
302 | 281 | ||
@@ -317,14 +296,14 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) | |||
317 | { | 296 | { |
318 | s32 ret_val; | 297 | s32 ret_val; |
319 | 298 | ||
320 | ret_val = hw->phy.ops.acquire_phy(hw); | 299 | ret_val = hw->phy.ops.acquire(hw); |
321 | if (ret_val) | 300 | if (ret_val) |
322 | return ret_val; | 301 | return ret_val; |
323 | 302 | ||
324 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 303 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
325 | data); | 304 | data); |
326 | 305 | ||
327 | hw->phy.ops.release_phy(hw); | 306 | hw->phy.ops.release(hw); |
328 | 307 | ||
329 | return ret_val; | 308 | return ret_val; |
330 | } | 309 | } |
@@ -342,14 +321,14 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) | |||
342 | { | 321 | { |
343 | s32 ret_val; | 322 | s32 ret_val; |
344 | 323 | ||
345 | ret_val = hw->phy.ops.acquire_phy(hw); | 324 | ret_val = hw->phy.ops.acquire(hw); |
346 | if (ret_val) | 325 | if (ret_val) |
347 | return ret_val; | 326 | return ret_val; |
348 | 327 | ||
349 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 328 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
350 | data); | 329 | data); |
351 | 330 | ||
352 | hw->phy.ops.release_phy(hw); | 331 | hw->phy.ops.release(hw); |
353 | 332 | ||
354 | return ret_val; | 333 | return ret_val; |
355 | } | 334 | } |
@@ -371,10 +350,10 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
371 | s32 ret_val = 0; | 350 | s32 ret_val = 0; |
372 | 351 | ||
373 | if (!locked) { | 352 | if (!locked) { |
374 | if (!(hw->phy.ops.acquire_phy)) | 353 | if (!(hw->phy.ops.acquire)) |
375 | goto out; | 354 | goto out; |
376 | 355 | ||
377 | ret_val = hw->phy.ops.acquire_phy(hw); | 356 | ret_val = hw->phy.ops.acquire(hw); |
378 | if (ret_val) | 357 | if (ret_val) |
379 | goto out; | 358 | goto out; |
380 | } | 359 | } |
@@ -392,7 +371,7 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
392 | 371 | ||
393 | release: | 372 | release: |
394 | if (!locked) | 373 | if (!locked) |
395 | hw->phy.ops.release_phy(hw); | 374 | hw->phy.ops.release(hw); |
396 | out: | 375 | out: |
397 | return ret_val; | 376 | return ret_val; |
398 | } | 377 | } |
@@ -442,10 +421,10 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
442 | s32 ret_val = 0; | 421 | s32 ret_val = 0; |
443 | 422 | ||
444 | if (!locked) { | 423 | if (!locked) { |
445 | if (!(hw->phy.ops.acquire_phy)) | 424 | if (!(hw->phy.ops.acquire)) |
446 | goto out; | 425 | goto out; |
447 | 426 | ||
448 | ret_val = hw->phy.ops.acquire_phy(hw); | 427 | ret_val = hw->phy.ops.acquire(hw); |
449 | if (ret_val) | 428 | if (ret_val) |
450 | goto out; | 429 | goto out; |
451 | } | 430 | } |
@@ -463,7 +442,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
463 | 442 | ||
464 | release: | 443 | release: |
465 | if (!locked) | 444 | if (!locked) |
466 | hw->phy.ops.release_phy(hw); | 445 | hw->phy.ops.release(hw); |
467 | 446 | ||
468 | out: | 447 | out: |
469 | return ret_val; | 448 | return ret_val; |
@@ -515,10 +494,10 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
515 | s32 ret_val = 0; | 494 | s32 ret_val = 0; |
516 | 495 | ||
517 | if (!locked) { | 496 | if (!locked) { |
518 | if (!(hw->phy.ops.acquire_phy)) | 497 | if (!(hw->phy.ops.acquire)) |
519 | goto out; | 498 | goto out; |
520 | 499 | ||
521 | ret_val = hw->phy.ops.acquire_phy(hw); | 500 | ret_val = hw->phy.ops.acquire(hw); |
522 | if (ret_val) | 501 | if (ret_val) |
523 | goto out; | 502 | goto out; |
524 | } | 503 | } |
@@ -533,7 +512,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
533 | *data = (u16)kmrnctrlsta; | 512 | *data = (u16)kmrnctrlsta; |
534 | 513 | ||
535 | if (!locked) | 514 | if (!locked) |
536 | hw->phy.ops.release_phy(hw); | 515 | hw->phy.ops.release(hw); |
537 | 516 | ||
538 | out: | 517 | out: |
539 | return ret_val; | 518 | return ret_val; |
@@ -587,10 +566,10 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
587 | s32 ret_val = 0; | 566 | s32 ret_val = 0; |
588 | 567 | ||
589 | if (!locked) { | 568 | if (!locked) { |
590 | if (!(hw->phy.ops.acquire_phy)) | 569 | if (!(hw->phy.ops.acquire)) |
591 | goto out; | 570 | goto out; |
592 | 571 | ||
593 | ret_val = hw->phy.ops.acquire_phy(hw); | 572 | ret_val = hw->phy.ops.acquire(hw); |
594 | if (ret_val) | 573 | if (ret_val) |
595 | goto out; | 574 | goto out; |
596 | } | 575 | } |
@@ -602,7 +581,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
602 | udelay(2); | 581 | udelay(2); |
603 | 582 | ||
604 | if (!locked) | 583 | if (!locked) |
605 | hw->phy.ops.release_phy(hw); | 584 | hw->phy.ops.release(hw); |
606 | 585 | ||
607 | out: | 586 | out: |
608 | return ret_val; | 587 | return ret_val; |
@@ -649,7 +628,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
649 | u16 phy_data; | 628 | u16 phy_data; |
650 | 629 | ||
651 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 630 | /* Enable CRS on TX. This must be set for half-duplex operation. */ |
652 | ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data); | 631 | ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); |
653 | if (ret_val) | 632 | if (ret_val) |
654 | goto out; | 633 | goto out; |
655 | 634 | ||
@@ -658,7 +637,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
658 | /* Enable downshift */ | 637 | /* Enable downshift */ |
659 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | 638 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; |
660 | 639 | ||
661 | ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); | 640 | ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); |
662 | 641 | ||
663 | out: | 642 | out: |
664 | return ret_val; | 643 | return ret_val; |
@@ -776,12 +755,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
776 | /* Commit the changes. */ | 755 | /* Commit the changes. */ |
777 | ret_val = e1000e_commit_phy(hw); | 756 | ret_val = e1000e_commit_phy(hw); |
778 | if (ret_val) { | 757 | if (ret_val) { |
779 | hw_dbg(hw, "Error committing the PHY changes\n"); | 758 | e_dbg("Error committing the PHY changes\n"); |
780 | return ret_val; | 759 | return ret_val; |
781 | } | 760 | } |
782 | 761 | ||
783 | if (phy->type == e1000_phy_82578) { | 762 | if (phy->type == e1000_phy_82578) { |
784 | ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 763 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
785 | &phy_data); | 764 | &phy_data); |
786 | if (ret_val) | 765 | if (ret_val) |
787 | return ret_val; | 766 | return ret_val; |
@@ -789,7 +768,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
789 | /* 82578 PHY - set the downshift count to 1x. */ | 768 | /* 82578 PHY - set the downshift count to 1x. */ |
790 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; | 769 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; |
791 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; | 770 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; |
792 | ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 771 | ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
793 | phy_data); | 772 | phy_data); |
794 | if (ret_val) | 773 | if (ret_val) |
795 | return ret_val; | 774 | return ret_val; |
@@ -813,7 +792,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
813 | 792 | ||
814 | ret_val = e1000_phy_hw_reset(hw); | 793 | ret_val = e1000_phy_hw_reset(hw); |
815 | if (ret_val) { | 794 | if (ret_val) { |
816 | hw_dbg(hw, "Error resetting the PHY.\n"); | 795 | e_dbg("Error resetting the PHY.\n"); |
817 | return ret_val; | 796 | return ret_val; |
818 | } | 797 | } |
819 | 798 | ||
@@ -824,9 +803,9 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
824 | msleep(100); | 803 | msleep(100); |
825 | 804 | ||
826 | /* disable lplu d0 during driver init */ | 805 | /* disable lplu d0 during driver init */ |
827 | ret_val = e1000_set_d0_lplu_state(hw, 0); | 806 | ret_val = e1000_set_d0_lplu_state(hw, false); |
828 | if (ret_val) { | 807 | if (ret_val) { |
829 | hw_dbg(hw, "Error Disabling LPLU D0\n"); | 808 | e_dbg("Error Disabling LPLU D0\n"); |
830 | return ret_val; | 809 | return ret_val; |
831 | } | 810 | } |
832 | /* Configure mdi-mdix settings */ | 811 | /* Configure mdi-mdix settings */ |
@@ -962,39 +941,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
962 | NWAY_AR_10T_HD_CAPS); | 941 | NWAY_AR_10T_HD_CAPS); |
963 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | 942 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); |
964 | 943 | ||
965 | hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); | 944 | e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); |
966 | 945 | ||
967 | /* Do we want to advertise 10 Mb Half Duplex? */ | 946 | /* Do we want to advertise 10 Mb Half Duplex? */ |
968 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | 947 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { |
969 | hw_dbg(hw, "Advertise 10mb Half duplex\n"); | 948 | e_dbg("Advertise 10mb Half duplex\n"); |
970 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | 949 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; |
971 | } | 950 | } |
972 | 951 | ||
973 | /* Do we want to advertise 10 Mb Full Duplex? */ | 952 | /* Do we want to advertise 10 Mb Full Duplex? */ |
974 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | 953 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { |
975 | hw_dbg(hw, "Advertise 10mb Full duplex\n"); | 954 | e_dbg("Advertise 10mb Full duplex\n"); |
976 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | 955 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; |
977 | } | 956 | } |
978 | 957 | ||
979 | /* Do we want to advertise 100 Mb Half Duplex? */ | 958 | /* Do we want to advertise 100 Mb Half Duplex? */ |
980 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | 959 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { |
981 | hw_dbg(hw, "Advertise 100mb Half duplex\n"); | 960 | e_dbg("Advertise 100mb Half duplex\n"); |
982 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | 961 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; |
983 | } | 962 | } |
984 | 963 | ||
985 | /* Do we want to advertise 100 Mb Full Duplex? */ | 964 | /* Do we want to advertise 100 Mb Full Duplex? */ |
986 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | 965 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { |
987 | hw_dbg(hw, "Advertise 100mb Full duplex\n"); | 966 | e_dbg("Advertise 100mb Full duplex\n"); |
988 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | 967 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; |
989 | } | 968 | } |
990 | 969 | ||
991 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | 970 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ |
992 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) | 971 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) |
993 | hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); | 972 | e_dbg("Advertise 1000mb Half duplex request denied!\n"); |
994 | 973 | ||
995 | /* Do we want to advertise 1000 Mb Full Duplex? */ | 974 | /* Do we want to advertise 1000 Mb Full Duplex? */ |
996 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | 975 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { |
997 | hw_dbg(hw, "Advertise 1000mb Full duplex\n"); | 976 | e_dbg("Advertise 1000mb Full duplex\n"); |
998 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 977 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
999 | } | 978 | } |
1000 | 979 | ||
@@ -1053,7 +1032,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1053 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1032 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1054 | break; | 1033 | break; |
1055 | default: | 1034 | default: |
1056 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 1035 | e_dbg("Flow control param set incorrectly\n"); |
1057 | ret_val = -E1000_ERR_CONFIG; | 1036 | ret_val = -E1000_ERR_CONFIG; |
1058 | return ret_val; | 1037 | return ret_val; |
1059 | } | 1038 | } |
@@ -1062,7 +1041,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1062 | if (ret_val) | 1041 | if (ret_val) |
1063 | return ret_val; | 1042 | return ret_val; |
1064 | 1043 | ||
1065 | hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1044 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1066 | 1045 | ||
1067 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | 1046 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { |
1068 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1047 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
@@ -1099,13 +1078,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1099 | if (phy->autoneg_advertised == 0) | 1078 | if (phy->autoneg_advertised == 0) |
1100 | phy->autoneg_advertised = phy->autoneg_mask; | 1079 | phy->autoneg_advertised = phy->autoneg_mask; |
1101 | 1080 | ||
1102 | hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); | 1081 | e_dbg("Reconfiguring auto-neg advertisement params\n"); |
1103 | ret_val = e1000_phy_setup_autoneg(hw); | 1082 | ret_val = e1000_phy_setup_autoneg(hw); |
1104 | if (ret_val) { | 1083 | if (ret_val) { |
1105 | hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); | 1084 | e_dbg("Error Setting up Auto-Negotiation\n"); |
1106 | return ret_val; | 1085 | return ret_val; |
1107 | } | 1086 | } |
1108 | hw_dbg(hw, "Restarting Auto-Neg\n"); | 1087 | e_dbg("Restarting Auto-Neg\n"); |
1109 | 1088 | ||
1110 | /* | 1089 | /* |
1111 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | 1090 | * Restart auto-negotiation by setting the Auto Neg Enable bit and |
@@ -1127,7 +1106,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1127 | if (phy->autoneg_wait_to_complete) { | 1106 | if (phy->autoneg_wait_to_complete) { |
1128 | ret_val = e1000_wait_autoneg(hw); | 1107 | ret_val = e1000_wait_autoneg(hw); |
1129 | if (ret_val) { | 1108 | if (ret_val) { |
1130 | hw_dbg(hw, "Error while waiting for " | 1109 | e_dbg("Error while waiting for " |
1131 | "autoneg to complete\n"); | 1110 | "autoneg to complete\n"); |
1132 | return ret_val; | 1111 | return ret_val; |
1133 | } | 1112 | } |
@@ -1165,10 +1144,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1165 | * PHY will be set to 10H, 10F, 100H or 100F | 1144 | * PHY will be set to 10H, 10F, 100H or 100F |
1166 | * depending on user settings. | 1145 | * depending on user settings. |
1167 | */ | 1146 | */ |
1168 | hw_dbg(hw, "Forcing Speed and Duplex\n"); | 1147 | e_dbg("Forcing Speed and Duplex\n"); |
1169 | ret_val = e1000_phy_force_speed_duplex(hw); | 1148 | ret_val = e1000_phy_force_speed_duplex(hw); |
1170 | if (ret_val) { | 1149 | if (ret_val) { |
1171 | hw_dbg(hw, "Error Forcing Speed and Duplex\n"); | 1150 | e_dbg("Error Forcing Speed and Duplex\n"); |
1172 | return ret_val; | 1151 | return ret_val; |
1173 | } | 1152 | } |
1174 | } | 1153 | } |
@@ -1185,11 +1164,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1185 | return ret_val; | 1164 | return ret_val; |
1186 | 1165 | ||
1187 | if (link) { | 1166 | if (link) { |
1188 | hw_dbg(hw, "Valid link established!!!\n"); | 1167 | e_dbg("Valid link established!!!\n"); |
1189 | e1000e_config_collision_dist(hw); | 1168 | e1000e_config_collision_dist(hw); |
1190 | ret_val = e1000e_config_fc_after_link_up(hw); | 1169 | ret_val = e1000e_config_fc_after_link_up(hw); |
1191 | } else { | 1170 | } else { |
1192 | hw_dbg(hw, "Unable to establish link!!!\n"); | 1171 | e_dbg("Unable to establish link!!!\n"); |
1193 | } | 1172 | } |
1194 | 1173 | ||
1195 | return ret_val; | 1174 | return ret_val; |
@@ -1235,12 +1214,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1235 | if (ret_val) | 1214 | if (ret_val) |
1236 | return ret_val; | 1215 | return ret_val; |
1237 | 1216 | ||
1238 | hw_dbg(hw, "IGP PSCR: %X\n", phy_data); | 1217 | e_dbg("IGP PSCR: %X\n", phy_data); |
1239 | 1218 | ||
1240 | udelay(1); | 1219 | udelay(1); |
1241 | 1220 | ||
1242 | if (phy->autoneg_wait_to_complete) { | 1221 | if (phy->autoneg_wait_to_complete) { |
1243 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); | 1222 | e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); |
1244 | 1223 | ||
1245 | ret_val = e1000e_phy_has_link_generic(hw, | 1224 | ret_val = e1000e_phy_has_link_generic(hw, |
1246 | PHY_FORCE_LIMIT, | 1225 | PHY_FORCE_LIMIT, |
@@ -1250,7 +1229,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1250 | return ret_val; | 1229 | return ret_val; |
1251 | 1230 | ||
1252 | if (!link) | 1231 | if (!link) |
1253 | hw_dbg(hw, "Link taking longer than expected.\n"); | 1232 | e_dbg("Link taking longer than expected.\n"); |
1254 | 1233 | ||
1255 | /* Try once more */ | 1234 | /* Try once more */ |
1256 | ret_val = e1000e_phy_has_link_generic(hw, | 1235 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -1294,7 +1273,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1294 | if (ret_val) | 1273 | if (ret_val) |
1295 | return ret_val; | 1274 | return ret_val; |
1296 | 1275 | ||
1297 | hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); | 1276 | e_dbg("M88E1000 PSCR: %X\n", phy_data); |
1298 | 1277 | ||
1299 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 1278 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
1300 | if (ret_val) | 1279 | if (ret_val) |
@@ -1312,7 +1291,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1312 | return ret_val; | 1291 | return ret_val; |
1313 | 1292 | ||
1314 | if (phy->autoneg_wait_to_complete) { | 1293 | if (phy->autoneg_wait_to_complete) { |
1315 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); | 1294 | e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); |
1316 | 1295 | ||
1317 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 1296 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
1318 | 100000, &link); | 1297 | 100000, &link); |
@@ -1320,17 +1299,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1320 | return ret_val; | 1299 | return ret_val; |
1321 | 1300 | ||
1322 | if (!link) { | 1301 | if (!link) { |
1323 | /* | 1302 | if (hw->phy.type != e1000_phy_m88) { |
1324 | * We didn't get link. | 1303 | e_dbg("Link taking longer than expected.\n"); |
1325 | * Reset the DSP and cross our fingers. | 1304 | } else { |
1326 | */ | 1305 | /* |
1327 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, | 1306 | * We didn't get link. |
1328 | 0x001d); | 1307 | * Reset the DSP and cross our fingers. |
1329 | if (ret_val) | 1308 | */ |
1330 | return ret_val; | 1309 | ret_val = e1e_wphy(hw, |
1331 | ret_val = e1000e_phy_reset_dsp(hw); | 1310 | M88E1000_PHY_PAGE_SELECT, |
1332 | if (ret_val) | 1311 | 0x001d); |
1333 | return ret_val; | 1312 | if (ret_val) |
1313 | return ret_val; | ||
1314 | ret_val = e1000e_phy_reset_dsp(hw); | ||
1315 | if (ret_val) | ||
1316 | return ret_val; | ||
1317 | } | ||
1334 | } | 1318 | } |
1335 | 1319 | ||
1336 | /* Try once more */ | 1320 | /* Try once more */ |
@@ -1340,6 +1324,9 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1340 | return ret_val; | 1324 | return ret_val; |
1341 | } | 1325 | } |
1342 | 1326 | ||
1327 | if (hw->phy.type != e1000_phy_m88) | ||
1328 | return 0; | ||
1329 | |||
1343 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | 1330 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); |
1344 | if (ret_val) | 1331 | if (ret_val) |
1345 | return ret_val; | 1332 | return ret_val; |
@@ -1369,6 +1356,73 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1369 | } | 1356 | } |
1370 | 1357 | ||
1371 | /** | 1358 | /** |
1359 | * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex | ||
1360 | * @hw: pointer to the HW structure | ||
1361 | * | ||
1362 | * Forces the speed and duplex settings of the PHY. | ||
1363 | * This is a function pointer entry point only called by | ||
1364 | * PHY setup routines. | ||
1365 | **/ | ||
1366 | s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) | ||
1367 | { | ||
1368 | struct e1000_phy_info *phy = &hw->phy; | ||
1369 | s32 ret_val; | ||
1370 | u16 data; | ||
1371 | bool link; | ||
1372 | |||
1373 | ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | ||
1374 | if (ret_val) | ||
1375 | goto out; | ||
1376 | |||
1377 | e1000e_phy_force_speed_duplex_setup(hw, &data); | ||
1378 | |||
1379 | ret_val = e1e_wphy(hw, PHY_CONTROL, data); | ||
1380 | if (ret_val) | ||
1381 | goto out; | ||
1382 | |||
1383 | /* Disable MDI-X support for 10/100 */ | ||
1384 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
1385 | if (ret_val) | ||
1386 | goto out; | ||
1387 | |||
1388 | data &= ~IFE_PMC_AUTO_MDIX; | ||
1389 | data &= ~IFE_PMC_FORCE_MDIX; | ||
1390 | |||
1391 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | ||
1392 | if (ret_val) | ||
1393 | goto out; | ||
1394 | |||
1395 | e_dbg("IFE PMC: %X\n", data); | ||
1396 | |||
1397 | udelay(1); | ||
1398 | |||
1399 | if (phy->autoneg_wait_to_complete) { | ||
1400 | e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); | ||
1401 | |||
1402 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1403 | PHY_FORCE_LIMIT, | ||
1404 | 100000, | ||
1405 | &link); | ||
1406 | if (ret_val) | ||
1407 | goto out; | ||
1408 | |||
1409 | if (!link) | ||
1410 | e_dbg("Link taking longer than expected.\n"); | ||
1411 | |||
1412 | /* Try once more */ | ||
1413 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1414 | PHY_FORCE_LIMIT, | ||
1415 | 100000, | ||
1416 | &link); | ||
1417 | if (ret_val) | ||
1418 | goto out; | ||
1419 | } | ||
1420 | |||
1421 | out: | ||
1422 | return ret_val; | ||
1423 | } | ||
1424 | |||
1425 | /** | ||
1372 | * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex | 1426 | * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex |
1373 | * @hw: pointer to the HW structure | 1427 | * @hw: pointer to the HW structure |
1374 | * @phy_ctrl: pointer to current value of PHY_CONTROL | 1428 | * @phy_ctrl: pointer to current value of PHY_CONTROL |
@@ -1403,11 +1457,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1403 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | 1457 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { |
1404 | ctrl &= ~E1000_CTRL_FD; | 1458 | ctrl &= ~E1000_CTRL_FD; |
1405 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | 1459 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; |
1406 | hw_dbg(hw, "Half Duplex\n"); | 1460 | e_dbg("Half Duplex\n"); |
1407 | } else { | 1461 | } else { |
1408 | ctrl |= E1000_CTRL_FD; | 1462 | ctrl |= E1000_CTRL_FD; |
1409 | *phy_ctrl |= MII_CR_FULL_DUPLEX; | 1463 | *phy_ctrl |= MII_CR_FULL_DUPLEX; |
1410 | hw_dbg(hw, "Full Duplex\n"); | 1464 | e_dbg("Full Duplex\n"); |
1411 | } | 1465 | } |
1412 | 1466 | ||
1413 | /* Forcing 10mb or 100mb? */ | 1467 | /* Forcing 10mb or 100mb? */ |
@@ -1415,12 +1469,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1415 | ctrl |= E1000_CTRL_SPD_100; | 1469 | ctrl |= E1000_CTRL_SPD_100; |
1416 | *phy_ctrl |= MII_CR_SPEED_100; | 1470 | *phy_ctrl |= MII_CR_SPEED_100; |
1417 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | 1471 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); |
1418 | hw_dbg(hw, "Forcing 100mb\n"); | 1472 | e_dbg("Forcing 100mb\n"); |
1419 | } else { | 1473 | } else { |
1420 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | 1474 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); |
1421 | *phy_ctrl |= MII_CR_SPEED_10; | 1475 | *phy_ctrl |= MII_CR_SPEED_10; |
1422 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | 1476 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); |
1423 | hw_dbg(hw, "Forcing 10mb\n"); | 1477 | e_dbg("Forcing 10mb\n"); |
1424 | } | 1478 | } |
1425 | 1479 | ||
1426 | e1000e_config_collision_dist(hw); | 1480 | e1000e_config_collision_dist(hw); |
@@ -1523,8 +1577,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1523 | switch (phy->type) { | 1577 | switch (phy->type) { |
1524 | case e1000_phy_m88: | 1578 | case e1000_phy_m88: |
1525 | case e1000_phy_gg82563: | 1579 | case e1000_phy_gg82563: |
1580 | case e1000_phy_bm: | ||
1526 | case e1000_phy_82578: | 1581 | case e1000_phy_82578: |
1527 | case e1000_phy_82577: | ||
1528 | offset = M88E1000_PHY_SPEC_STATUS; | 1582 | offset = M88E1000_PHY_SPEC_STATUS; |
1529 | mask = M88E1000_PSSR_DOWNSHIFT; | 1583 | mask = M88E1000_PSSR_DOWNSHIFT; |
1530 | break; | 1584 | break; |
@@ -1535,7 +1589,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1535 | break; | 1589 | break; |
1536 | default: | 1590 | default: |
1537 | /* speed downshift not supported */ | 1591 | /* speed downshift not supported */ |
1538 | phy->speed_downgraded = 0; | 1592 | phy->speed_downgraded = false; |
1539 | return 0; | 1593 | return 0; |
1540 | } | 1594 | } |
1541 | 1595 | ||
@@ -1555,7 +1609,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1555 | * | 1609 | * |
1556 | * Polarity is determined based on the PHY specific status register. | 1610 | * Polarity is determined based on the PHY specific status register. |
1557 | **/ | 1611 | **/ |
1558 | static s32 e1000_check_polarity_m88(struct e1000_hw *hw) | 1612 | s32 e1000_check_polarity_m88(struct e1000_hw *hw) |
1559 | { | 1613 | { |
1560 | struct e1000_phy_info *phy = &hw->phy; | 1614 | struct e1000_phy_info *phy = &hw->phy; |
1561 | s32 ret_val; | 1615 | s32 ret_val; |
@@ -1580,7 +1634,7 @@ static s32 e1000_check_polarity_m88(struct e1000_hw *hw) | |||
1580 | * Polarity is determined based on the PHY port status register, and the | 1634 | * Polarity is determined based on the PHY port status register, and the |
1581 | * current speed (since there is no polarity at 100Mbps). | 1635 | * current speed (since there is no polarity at 100Mbps). |
1582 | **/ | 1636 | **/ |
1583 | static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | 1637 | s32 e1000_check_polarity_igp(struct e1000_hw *hw) |
1584 | { | 1638 | { |
1585 | struct e1000_phy_info *phy = &hw->phy; | 1639 | struct e1000_phy_info *phy = &hw->phy; |
1586 | s32 ret_val; | 1640 | s32 ret_val; |
@@ -1618,6 +1672,39 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | |||
1618 | } | 1672 | } |
1619 | 1673 | ||
1620 | /** | 1674 | /** |
1675 | * e1000_check_polarity_ife - Check cable polarity for IFE PHY | ||
1676 | * @hw: pointer to the HW structure | ||
1677 | * | ||
1678 | * Polarity is determined on the polarity reversal feature being enabled. | ||
1679 | **/ | ||
1680 | s32 e1000_check_polarity_ife(struct e1000_hw *hw) | ||
1681 | { | ||
1682 | struct e1000_phy_info *phy = &hw->phy; | ||
1683 | s32 ret_val; | ||
1684 | u16 phy_data, offset, mask; | ||
1685 | |||
1686 | /* | ||
1687 | * Polarity is determined based on the reversal feature being enabled. | ||
1688 | */ | ||
1689 | if (phy->polarity_correction) { | ||
1690 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | ||
1691 | mask = IFE_PESC_POLARITY_REVERSED; | ||
1692 | } else { | ||
1693 | offset = IFE_PHY_SPECIAL_CONTROL; | ||
1694 | mask = IFE_PSC_FORCE_POLARITY; | ||
1695 | } | ||
1696 | |||
1697 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1698 | |||
1699 | if (!ret_val) | ||
1700 | phy->cable_polarity = (phy_data & mask) | ||
1701 | ? e1000_rev_polarity_reversed | ||
1702 | : e1000_rev_polarity_normal; | ||
1703 | |||
1704 | return ret_val; | ||
1705 | } | ||
1706 | |||
1707 | /** | ||
1621 | * e1000_wait_autoneg - Wait for auto-neg completion | 1708 | * e1000_wait_autoneg - Wait for auto-neg completion |
1622 | * @hw: pointer to the HW structure | 1709 | * @hw: pointer to the HW structure |
1623 | * | 1710 | * |
@@ -1717,15 +1804,21 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | |||
1717 | 1804 | ||
1718 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 1805 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); |
1719 | if (ret_val) | 1806 | if (ret_val) |
1720 | return ret_val; | 1807 | goto out; |
1721 | 1808 | ||
1722 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | 1809 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> |
1723 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; | 1810 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; |
1811 | if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { | ||
1812 | ret_val = -E1000_ERR_PHY; | ||
1813 | goto out; | ||
1814 | } | ||
1815 | |||
1724 | phy->min_cable_length = e1000_m88_cable_length_table[index]; | 1816 | phy->min_cable_length = e1000_m88_cable_length_table[index]; |
1725 | phy->max_cable_length = e1000_m88_cable_length_table[index+1]; | 1817 | phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; |
1726 | 1818 | ||
1727 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | 1819 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; |
1728 | 1820 | ||
1821 | out: | ||
1729 | return ret_val; | 1822 | return ret_val; |
1730 | } | 1823 | } |
1731 | 1824 | ||
@@ -1736,7 +1829,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | |||
1736 | * The automatic gain control (agc) normalizes the amplitude of the | 1829 | * The automatic gain control (agc) normalizes the amplitude of the |
1737 | * received signal, adjusting for the attenuation produced by the | 1830 | * received signal, adjusting for the attenuation produced by the |
1738 | * cable. By reading the AGC registers, which represent the | 1831 | * cable. By reading the AGC registers, which represent the |
1739 | * combination of course and fine gain value, the value can be put | 1832 | * combination of coarse and fine gain value, the value can be put |
1740 | * into a lookup table to obtain the approximate cable length | 1833 | * into a lookup table to obtain the approximate cable length |
1741 | * for each channel. | 1834 | * for each channel. |
1742 | **/ | 1835 | **/ |
@@ -1761,7 +1854,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) | |||
1761 | 1854 | ||
1762 | /* | 1855 | /* |
1763 | * Getting bits 15:9, which represent the combination of | 1856 | * Getting bits 15:9, which represent the combination of |
1764 | * course and fine gain values. The result is a number | 1857 | * coarse and fine gain values. The result is a number |
1765 | * that can be put into the lookup table to obtain the | 1858 | * that can be put into the lookup table to obtain the |
1766 | * approximate cable length. | 1859 | * approximate cable length. |
1767 | */ | 1860 | */ |
@@ -1815,8 +1908,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1815 | u16 phy_data; | 1908 | u16 phy_data; |
1816 | bool link; | 1909 | bool link; |
1817 | 1910 | ||
1818 | if (hw->phy.media_type != e1000_media_type_copper) { | 1911 | if (phy->media_type != e1000_media_type_copper) { |
1819 | hw_dbg(hw, "Phy info is only valid for copper media\n"); | 1912 | e_dbg("Phy info is only valid for copper media\n"); |
1820 | return -E1000_ERR_CONFIG; | 1913 | return -E1000_ERR_CONFIG; |
1821 | } | 1914 | } |
1822 | 1915 | ||
@@ -1825,7 +1918,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1825 | return ret_val; | 1918 | return ret_val; |
1826 | 1919 | ||
1827 | if (!link) { | 1920 | if (!link) { |
1828 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1921 | e_dbg("Phy info is only valid if link is up\n"); |
1829 | return -E1000_ERR_CONFIG; | 1922 | return -E1000_ERR_CONFIG; |
1830 | } | 1923 | } |
1831 | 1924 | ||
@@ -1893,11 +1986,11 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1893 | return ret_val; | 1986 | return ret_val; |
1894 | 1987 | ||
1895 | if (!link) { | 1988 | if (!link) { |
1896 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1989 | e_dbg("Phy info is only valid if link is up\n"); |
1897 | return -E1000_ERR_CONFIG; | 1990 | return -E1000_ERR_CONFIG; |
1898 | } | 1991 | } |
1899 | 1992 | ||
1900 | phy->polarity_correction = 1; | 1993 | phy->polarity_correction = true; |
1901 | 1994 | ||
1902 | ret_val = e1000_check_polarity_igp(hw); | 1995 | ret_val = e1000_check_polarity_igp(hw); |
1903 | if (ret_val) | 1996 | if (ret_val) |
@@ -1936,6 +2029,61 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1936 | } | 2029 | } |
1937 | 2030 | ||
1938 | /** | 2031 | /** |
2032 | * e1000_get_phy_info_ife - Retrieves various IFE PHY states | ||
2033 | * @hw: pointer to the HW structure | ||
2034 | * | ||
2035 | * Populates "phy" structure with various feature states. | ||
2036 | **/ | ||
2037 | s32 e1000_get_phy_info_ife(struct e1000_hw *hw) | ||
2038 | { | ||
2039 | struct e1000_phy_info *phy = &hw->phy; | ||
2040 | s32 ret_val; | ||
2041 | u16 data; | ||
2042 | bool link; | ||
2043 | |||
2044 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
2045 | if (ret_val) | ||
2046 | goto out; | ||
2047 | |||
2048 | if (!link) { | ||
2049 | e_dbg("Phy info is only valid if link is up\n"); | ||
2050 | ret_val = -E1000_ERR_CONFIG; | ||
2051 | goto out; | ||
2052 | } | ||
2053 | |||
2054 | ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | ||
2055 | if (ret_val) | ||
2056 | goto out; | ||
2057 | phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) | ||
2058 | ? false : true; | ||
2059 | |||
2060 | if (phy->polarity_correction) { | ||
2061 | ret_val = e1000_check_polarity_ife(hw); | ||
2062 | if (ret_val) | ||
2063 | goto out; | ||
2064 | } else { | ||
2065 | /* Polarity is forced */ | ||
2066 | phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | ||
2067 | ? e1000_rev_polarity_reversed | ||
2068 | : e1000_rev_polarity_normal; | ||
2069 | } | ||
2070 | |||
2071 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
2072 | if (ret_val) | ||
2073 | goto out; | ||
2074 | |||
2075 | phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; | ||
2076 | |||
2077 | /* The following parameters are undefined for 10/100 operation. */ | ||
2078 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
2079 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
2080 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
2081 | |||
2082 | out: | ||
2083 | return ret_val; | ||
2084 | } | ||
2085 | |||
2086 | /** | ||
1939 | * e1000e_phy_sw_reset - PHY software reset | 2087 | * e1000e_phy_sw_reset - PHY software reset |
1940 | * @hw: pointer to the HW structure | 2088 | * @hw: pointer to the HW structure |
1941 | * | 2089 | * |
@@ -1980,7 +2128,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
1980 | if (ret_val) | 2128 | if (ret_val) |
1981 | return 0; | 2129 | return 0; |
1982 | 2130 | ||
1983 | ret_val = phy->ops.acquire_phy(hw); | 2131 | ret_val = phy->ops.acquire(hw); |
1984 | if (ret_val) | 2132 | if (ret_val) |
1985 | return ret_val; | 2133 | return ret_val; |
1986 | 2134 | ||
@@ -1995,7 +2143,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
1995 | 2143 | ||
1996 | udelay(150); | 2144 | udelay(150); |
1997 | 2145 | ||
1998 | phy->ops.release_phy(hw); | 2146 | phy->ops.release(hw); |
1999 | 2147 | ||
2000 | return e1000_get_phy_cfg_done(hw); | 2148 | return e1000_get_phy_cfg_done(hw); |
2001 | } | 2149 | } |
@@ -2021,7 +2169,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw) | |||
2021 | **/ | 2169 | **/ |
2022 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) | 2170 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) |
2023 | { | 2171 | { |
2024 | hw_dbg(hw, "Running IGP 3 PHY init script\n"); | 2172 | e_dbg("Running IGP 3 PHY init script\n"); |
2025 | 2173 | ||
2026 | /* PHY init IGP 3 */ | 2174 | /* PHY init IGP 3 */ |
2027 | /* Enable rise/fall, 10-mode work in class-A */ | 2175 | /* Enable rise/fall, 10-mode work in class-A */ |
@@ -2189,28 +2337,34 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) | |||
2189 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) | 2337 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) |
2190 | { | 2338 | { |
2191 | s32 ret_val = -E1000_ERR_PHY_TYPE; | 2339 | s32 ret_val = -E1000_ERR_PHY_TYPE; |
2192 | u32 phy_addr= 0; | 2340 | u32 phy_addr = 0; |
2193 | u32 i = 0; | 2341 | u32 i; |
2194 | enum e1000_phy_type phy_type = e1000_phy_unknown; | 2342 | enum e1000_phy_type phy_type = e1000_phy_unknown; |
2195 | 2343 | ||
2196 | do { | 2344 | hw->phy.id = phy_type; |
2197 | for (phy_addr = 0; phy_addr < 4; phy_addr++) { | 2345 | |
2198 | hw->phy.addr = phy_addr; | 2346 | for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { |
2347 | hw->phy.addr = phy_addr; | ||
2348 | i = 0; | ||
2349 | |||
2350 | do { | ||
2199 | e1000e_get_phy_id(hw); | 2351 | e1000e_get_phy_id(hw); |
2200 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); | 2352 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); |
2201 | 2353 | ||
2202 | /* | 2354 | /* |
2203 | * If phy_type is valid, break - we found our | 2355 | * If phy_type is valid, break - we found our |
2204 | * PHY address | 2356 | * PHY address |
2205 | */ | 2357 | */ |
2206 | if (phy_type != e1000_phy_unknown) { | 2358 | if (phy_type != e1000_phy_unknown) { |
2207 | ret_val = 0; | 2359 | ret_val = 0; |
2208 | break; | 2360 | goto out; |
2209 | } | 2361 | } |
2210 | } | 2362 | msleep(1); |
2211 | i++; | 2363 | i++; |
2212 | } while ((ret_val != 0) && (i < 100)); | 2364 | } while (i < 10); |
2365 | } | ||
2213 | 2366 | ||
2367 | out: | ||
2214 | return ret_val; | 2368 | return ret_val; |
2215 | } | 2369 | } |
2216 | 2370 | ||
@@ -2246,7 +2400,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2246 | u32 page = offset >> IGP_PAGE_SHIFT; | 2400 | u32 page = offset >> IGP_PAGE_SHIFT; |
2247 | u32 page_shift = 0; | 2401 | u32 page_shift = 0; |
2248 | 2402 | ||
2249 | ret_val = hw->phy.ops.acquire_phy(hw); | 2403 | ret_val = hw->phy.ops.acquire(hw); |
2250 | if (ret_val) | 2404 | if (ret_val) |
2251 | return ret_val; | 2405 | return ret_val; |
2252 | 2406 | ||
@@ -2284,7 +2438,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2284 | data); | 2438 | data); |
2285 | 2439 | ||
2286 | out: | 2440 | out: |
2287 | hw->phy.ops.release_phy(hw); | 2441 | hw->phy.ops.release(hw); |
2288 | return ret_val; | 2442 | return ret_val; |
2289 | } | 2443 | } |
2290 | 2444 | ||
@@ -2305,7 +2459,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2305 | u32 page = offset >> IGP_PAGE_SHIFT; | 2459 | u32 page = offset >> IGP_PAGE_SHIFT; |
2306 | u32 page_shift = 0; | 2460 | u32 page_shift = 0; |
2307 | 2461 | ||
2308 | ret_val = hw->phy.ops.acquire_phy(hw); | 2462 | ret_val = hw->phy.ops.acquire(hw); |
2309 | if (ret_val) | 2463 | if (ret_val) |
2310 | return ret_val; | 2464 | return ret_val; |
2311 | 2465 | ||
@@ -2342,7 +2496,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2342 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2496 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2343 | data); | 2497 | data); |
2344 | out: | 2498 | out: |
2345 | hw->phy.ops.release_phy(hw); | 2499 | hw->phy.ops.release(hw); |
2346 | return ret_val; | 2500 | return ret_val; |
2347 | } | 2501 | } |
2348 | 2502 | ||
@@ -2361,7 +2515,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2361 | s32 ret_val; | 2515 | s32 ret_val; |
2362 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2516 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2363 | 2517 | ||
2364 | ret_val = hw->phy.ops.acquire_phy(hw); | 2518 | ret_val = hw->phy.ops.acquire(hw); |
2365 | if (ret_val) | 2519 | if (ret_val) |
2366 | return ret_val; | 2520 | return ret_val; |
2367 | 2521 | ||
@@ -2387,7 +2541,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2387 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2541 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2388 | data); | 2542 | data); |
2389 | out: | 2543 | out: |
2390 | hw->phy.ops.release_phy(hw); | 2544 | hw->phy.ops.release(hw); |
2391 | return ret_val; | 2545 | return ret_val; |
2392 | } | 2546 | } |
2393 | 2547 | ||
@@ -2405,7 +2559,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2405 | s32 ret_val; | 2559 | s32 ret_val; |
2406 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2560 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2407 | 2561 | ||
2408 | ret_val = hw->phy.ops.acquire_phy(hw); | 2562 | ret_val = hw->phy.ops.acquire(hw); |
2409 | if (ret_val) | 2563 | if (ret_val) |
2410 | return ret_val; | 2564 | return ret_val; |
2411 | 2565 | ||
@@ -2431,7 +2585,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2431 | data); | 2585 | data); |
2432 | 2586 | ||
2433 | out: | 2587 | out: |
2434 | hw->phy.ops.release_phy(hw); | 2588 | hw->phy.ops.release(hw); |
2435 | return ret_val; | 2589 | return ret_val; |
2436 | } | 2590 | } |
2437 | 2591 | ||
@@ -2464,7 +2618,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2464 | /* Gig must be disabled for MDIO accesses to page 800 */ | 2618 | /* Gig must be disabled for MDIO accesses to page 800 */ |
2465 | if ((hw->mac.type == e1000_pchlan) && | 2619 | if ((hw->mac.type == e1000_pchlan) && |
2466 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | 2620 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) |
2467 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); | 2621 | e_dbg("Attempting to access page 800 while gig enabled.\n"); |
2468 | 2622 | ||
2469 | /* All operations in this function are phy address 1 */ | 2623 | /* All operations in this function are phy address 1 */ |
2470 | hw->phy.addr = 1; | 2624 | hw->phy.addr = 1; |
@@ -2474,20 +2628,26 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2474 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); | 2628 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); |
2475 | 2629 | ||
2476 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); | 2630 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); |
2477 | if (ret_val) | 2631 | if (ret_val) { |
2632 | e_dbg("Could not read PHY page 769\n"); | ||
2478 | goto out; | 2633 | goto out; |
2634 | } | ||
2479 | 2635 | ||
2480 | /* First clear bit 4 to avoid a power state change */ | 2636 | /* First clear bit 4 to avoid a power state change */ |
2481 | phy_reg &= ~(BM_WUC_HOST_WU_BIT); | 2637 | phy_reg &= ~(BM_WUC_HOST_WU_BIT); |
2482 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | 2638 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); |
2483 | if (ret_val) | 2639 | if (ret_val) { |
2640 | e_dbg("Could not clear PHY page 769 bit 4\n"); | ||
2484 | goto out; | 2641 | goto out; |
2642 | } | ||
2485 | 2643 | ||
2486 | /* Write bit 2 = 1, and clear bit 4 to 769_17 */ | 2644 | /* Write bit 2 = 1, and clear bit 4 to 769_17 */ |
2487 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, | 2645 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, |
2488 | phy_reg | BM_WUC_ENABLE_BIT); | 2646 | phy_reg | BM_WUC_ENABLE_BIT); |
2489 | if (ret_val) | 2647 | if (ret_val) { |
2648 | e_dbg("Could not write PHY page 769 bit 2\n"); | ||
2490 | goto out; | 2649 | goto out; |
2650 | } | ||
2491 | 2651 | ||
2492 | /* Select page 800 */ | 2652 | /* Select page 800 */ |
2493 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | 2653 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -2495,21 +2655,25 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2495 | 2655 | ||
2496 | /* Write the page 800 offset value using opcode 0x11 */ | 2656 | /* Write the page 800 offset value using opcode 0x11 */ |
2497 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); | 2657 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); |
2498 | if (ret_val) | 2658 | if (ret_val) { |
2659 | e_dbg("Could not write address opcode to page 800\n"); | ||
2499 | goto out; | 2660 | goto out; |
2661 | } | ||
2500 | 2662 | ||
2501 | if (read) { | 2663 | if (read) { |
2502 | /* Read the page 800 value using opcode 0x12 */ | 2664 | /* Read the page 800 value using opcode 0x12 */ |
2503 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | 2665 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, |
2504 | data); | 2666 | data); |
2505 | } else { | 2667 | } else { |
2506 | /* Read the page 800 value using opcode 0x12 */ | 2668 | /* Write the page 800 value using opcode 0x12 */ |
2507 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | 2669 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, |
2508 | *data); | 2670 | *data); |
2509 | } | 2671 | } |
2510 | 2672 | ||
2511 | if (ret_val) | 2673 | if (ret_val) { |
2674 | e_dbg("Could not access data value from page 800\n"); | ||
2512 | goto out; | 2675 | goto out; |
2676 | } | ||
2513 | 2677 | ||
2514 | /* | 2678 | /* |
2515 | * Restore 769_17.2 to its original value | 2679 | * Restore 769_17.2 to its original value |
@@ -2520,12 +2684,53 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2520 | 2684 | ||
2521 | /* Clear 769_17.2 */ | 2685 | /* Clear 769_17.2 */ |
2522 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | 2686 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); |
2687 | if (ret_val) { | ||
2688 | e_dbg("Could not clear PHY page 769 bit 2\n"); | ||
2689 | goto out; | ||
2690 | } | ||
2523 | 2691 | ||
2524 | out: | 2692 | out: |
2525 | return ret_val; | 2693 | return ret_val; |
2526 | } | 2694 | } |
2527 | 2695 | ||
2528 | /** | 2696 | /** |
2697 | * e1000_power_up_phy_copper - Restore copper link in case of PHY power down | ||
2698 | * @hw: pointer to the HW structure | ||
2699 | * | ||
2700 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2701 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2702 | * settings. | ||
2703 | **/ | ||
2704 | void e1000_power_up_phy_copper(struct e1000_hw *hw) | ||
2705 | { | ||
2706 | u16 mii_reg = 0; | ||
2707 | |||
2708 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2709 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2710 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2711 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2712 | } | ||
2713 | |||
2714 | /** | ||
2715 | * e1000_power_down_phy_copper - Restore copper link in case of PHY power down | ||
2716 | * @hw: pointer to the HW structure | ||
2717 | * | ||
2718 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2719 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2720 | * settings. | ||
2721 | **/ | ||
2722 | void e1000_power_down_phy_copper(struct e1000_hw *hw) | ||
2723 | { | ||
2724 | u16 mii_reg = 0; | ||
2725 | |||
2726 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2727 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2728 | mii_reg |= MII_CR_POWER_DOWN; | ||
2729 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2730 | msleep(1); | ||
2731 | } | ||
2732 | |||
2733 | /** | ||
2529 | * e1000e_commit_phy - Soft PHY reset | 2734 | * e1000e_commit_phy - Soft PHY reset |
2530 | * @hw: pointer to the HW structure | 2735 | * @hw: pointer to the HW structure |
2531 | * | 2736 | * |
@@ -2534,8 +2739,8 @@ out: | |||
2534 | **/ | 2739 | **/ |
2535 | s32 e1000e_commit_phy(struct e1000_hw *hw) | 2740 | s32 e1000e_commit_phy(struct e1000_hw *hw) |
2536 | { | 2741 | { |
2537 | if (hw->phy.ops.commit_phy) | 2742 | if (hw->phy.ops.commit) |
2538 | return hw->phy.ops.commit_phy(hw); | 2743 | return hw->phy.ops.commit(hw); |
2539 | 2744 | ||
2540 | return 0; | 2745 | return 0; |
2541 | } | 2746 | } |
@@ -2563,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |||
2563 | } | 2768 | } |
2564 | 2769 | ||
2565 | /** | 2770 | /** |
2566 | * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode | ||
2567 | * @hw: pointer to the HW structure | ||
2568 | * @slow: true for slow mode, false for normal mode | ||
2569 | * | ||
2570 | * Assumes semaphore already acquired. | ||
2571 | **/ | ||
2572 | s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) | ||
2573 | { | ||
2574 | s32 ret_val = 0; | ||
2575 | u16 data = 0; | ||
2576 | |||
2577 | /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ | ||
2578 | hw->phy.addr = 1; | ||
2579 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | ||
2580 | (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | ||
2581 | if (ret_val) | ||
2582 | goto out; | ||
2583 | |||
2584 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, | ||
2585 | (0x2180 | (slow << 10))); | ||
2586 | if (ret_val) | ||
2587 | goto out; | ||
2588 | |||
2589 | /* dummy read when reverting to fast mode - throw away result */ | ||
2590 | if (!slow) | ||
2591 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); | ||
2592 | |||
2593 | out: | ||
2594 | return ret_val; | ||
2595 | } | ||
2596 | |||
2597 | /** | ||
2598 | * __e1000_read_phy_reg_hv - Read HV PHY register | 2771 | * __e1000_read_phy_reg_hv - Read HV PHY register |
2599 | * @hw: pointer to the HW structure | 2772 | * @hw: pointer to the HW structure |
2600 | * @offset: register offset to be read | 2773 | * @offset: register offset to be read |
@@ -2611,24 +2784,13 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2611 | s32 ret_val; | 2784 | s32 ret_val; |
2612 | u16 page = BM_PHY_REG_PAGE(offset); | 2785 | u16 page = BM_PHY_REG_PAGE(offset); |
2613 | u16 reg = BM_PHY_REG_NUM(offset); | 2786 | u16 reg = BM_PHY_REG_NUM(offset); |
2614 | bool in_slow_mode = false; | ||
2615 | 2787 | ||
2616 | if (!locked) { | 2788 | if (!locked) { |
2617 | ret_val = hw->phy.ops.acquire_phy(hw); | 2789 | ret_val = hw->phy.ops.acquire(hw); |
2618 | if (ret_val) | 2790 | if (ret_val) |
2619 | return ret_val; | 2791 | return ret_val; |
2620 | } | 2792 | } |
2621 | 2793 | ||
2622 | /* Workaround failure in MDIO access while cable is disconnected */ | ||
2623 | if ((hw->phy.type == e1000_phy_82577) && | ||
2624 | !(er32(STATUS) & E1000_STATUS_LU)) { | ||
2625 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | ||
2626 | if (ret_val) | ||
2627 | goto out; | ||
2628 | |||
2629 | in_slow_mode = true; | ||
2630 | } | ||
2631 | |||
2632 | /* Page 800 works differently than the rest so it has its own func */ | 2794 | /* Page 800 works differently than the rest so it has its own func */ |
2633 | if (page == BM_WUC_PAGE) { | 2795 | if (page == BM_WUC_PAGE) { |
2634 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, | 2796 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, |
@@ -2665,12 +2827,8 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2665 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | 2827 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, |
2666 | data); | 2828 | data); |
2667 | out: | 2829 | out: |
2668 | /* Revert to MDIO fast mode, if applicable */ | ||
2669 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | ||
2670 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); | ||
2671 | |||
2672 | if (!locked) | 2830 | if (!locked) |
2673 | hw->phy.ops.release_phy(hw); | 2831 | hw->phy.ops.release(hw); |
2674 | 2832 | ||
2675 | return ret_val; | 2833 | return ret_val; |
2676 | } | 2834 | } |
@@ -2720,24 +2878,13 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2720 | s32 ret_val; | 2878 | s32 ret_val; |
2721 | u16 page = BM_PHY_REG_PAGE(offset); | 2879 | u16 page = BM_PHY_REG_PAGE(offset); |
2722 | u16 reg = BM_PHY_REG_NUM(offset); | 2880 | u16 reg = BM_PHY_REG_NUM(offset); |
2723 | bool in_slow_mode = false; | ||
2724 | 2881 | ||
2725 | if (!locked) { | 2882 | if (!locked) { |
2726 | ret_val = hw->phy.ops.acquire_phy(hw); | 2883 | ret_val = hw->phy.ops.acquire(hw); |
2727 | if (ret_val) | 2884 | if (ret_val) |
2728 | return ret_val; | 2885 | return ret_val; |
2729 | } | 2886 | } |
2730 | 2887 | ||
2731 | /* Workaround failure in MDIO access while cable is disconnected */ | ||
2732 | if ((hw->phy.type == e1000_phy_82577) && | ||
2733 | !(er32(STATUS) & E1000_STATUS_LU)) { | ||
2734 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | ||
2735 | if (ret_val) | ||
2736 | goto out; | ||
2737 | |||
2738 | in_slow_mode = true; | ||
2739 | } | ||
2740 | |||
2741 | /* Page 800 works differently than the rest so it has its own func */ | 2888 | /* Page 800 works differently than the rest so it has its own func */ |
2742 | if (page == BM_WUC_PAGE) { | 2889 | if (page == BM_WUC_PAGE) { |
2743 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, | 2890 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, |
@@ -2791,12 +2938,8 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2791 | data); | 2938 | data); |
2792 | 2939 | ||
2793 | out: | 2940 | out: |
2794 | /* Revert to MDIO fast mode, if applicable */ | ||
2795 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | ||
2796 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); | ||
2797 | |||
2798 | if (!locked) | 2941 | if (!locked) |
2799 | hw->phy.ops.release_phy(hw); | 2942 | hw->phy.ops.release(hw); |
2800 | 2943 | ||
2801 | return ret_val; | 2944 | return ret_val; |
2802 | } | 2945 | } |
@@ -2872,7 +3015,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2872 | /* masking with 0x3F to remove the page from offset */ | 3015 | /* masking with 0x3F to remove the page from offset */ |
2873 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | 3016 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); |
2874 | if (ret_val) { | 3017 | if (ret_val) { |
2875 | hw_dbg(hw, "Could not write PHY the HV address register\n"); | 3018 | e_dbg("Could not write PHY the HV address register\n"); |
2876 | goto out; | 3019 | goto out; |
2877 | } | 3020 | } |
2878 | 3021 | ||
@@ -2883,7 +3026,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2883 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | 3026 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); |
2884 | 3027 | ||
2885 | if (ret_val) { | 3028 | if (ret_val) { |
2886 | hw_dbg(hw, "Could not read data value from HV data register\n"); | 3029 | e_dbg("Could not read data value from HV data register\n"); |
2887 | goto out; | 3030 | goto out; |
2888 | } | 3031 | } |
2889 | 3032 | ||
@@ -2911,12 +3054,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2911 | goto out; | 3054 | goto out; |
2912 | 3055 | ||
2913 | /* Do not apply workaround if in PHY loopback bit 14 set */ | 3056 | /* Do not apply workaround if in PHY loopback bit 14 set */ |
2914 | hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data); | 3057 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); |
2915 | if (data & PHY_CONTROL_LB) | 3058 | if (data & PHY_CONTROL_LB) |
2916 | goto out; | 3059 | goto out; |
2917 | 3060 | ||
2918 | /* check if link is up and at 1Gbps */ | 3061 | /* check if link is up and at 1Gbps */ |
2919 | ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); | 3062 | ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); |
2920 | if (ret_val) | 3063 | if (ret_val) |
2921 | goto out; | 3064 | goto out; |
2922 | 3065 | ||
@@ -2932,13 +3075,13 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2932 | mdelay(200); | 3075 | mdelay(200); |
2933 | 3076 | ||
2934 | /* flush the packets in the fifo buffer */ | 3077 | /* flush the packets in the fifo buffer */ |
2935 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 3078 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2936 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 3079 | HV_MUX_DATA_CTRL_GEN_TO_MAC | |
2937 | HV_MUX_DATA_CTRL_FORCE_SPEED); | 3080 | HV_MUX_DATA_CTRL_FORCE_SPEED); |
2938 | if (ret_val) | 3081 | if (ret_val) |
2939 | goto out; | 3082 | goto out; |
2940 | 3083 | ||
2941 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 3084 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2942 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | 3085 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
2943 | 3086 | ||
2944 | out: | 3087 | out: |
@@ -2959,7 +3102,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) | |||
2959 | s32 ret_val; | 3102 | s32 ret_val; |
2960 | u16 data; | 3103 | u16 data; |
2961 | 3104 | ||
2962 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 3105 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
2963 | 3106 | ||
2964 | if (!ret_val) | 3107 | if (!ret_val) |
2965 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | 3108 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) |
@@ -2984,13 +3127,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
2984 | u16 phy_data; | 3127 | u16 phy_data; |
2985 | bool link; | 3128 | bool link; |
2986 | 3129 | ||
2987 | ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); | 3130 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); |
2988 | if (ret_val) | 3131 | if (ret_val) |
2989 | goto out; | 3132 | goto out; |
2990 | 3133 | ||
2991 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | 3134 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); |
2992 | 3135 | ||
2993 | ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data); | 3136 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); |
2994 | if (ret_val) | 3137 | if (ret_val) |
2995 | goto out; | 3138 | goto out; |
2996 | 3139 | ||
@@ -2998,23 +3141,23 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
2998 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI | 3141 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI |
2999 | * forced whenever speed and duplex are forced. | 3142 | * forced whenever speed and duplex are forced. |
3000 | */ | 3143 | */ |
3001 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data); | 3144 | ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); |
3002 | if (ret_val) | 3145 | if (ret_val) |
3003 | goto out; | 3146 | goto out; |
3004 | 3147 | ||
3005 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; | 3148 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; |
3006 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; | 3149 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; |
3007 | 3150 | ||
3008 | ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data); | 3151 | ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); |
3009 | if (ret_val) | 3152 | if (ret_val) |
3010 | goto out; | 3153 | goto out; |
3011 | 3154 | ||
3012 | hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data); | 3155 | e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data); |
3013 | 3156 | ||
3014 | udelay(1); | 3157 | udelay(1); |
3015 | 3158 | ||
3016 | if (phy->autoneg_wait_to_complete) { | 3159 | if (phy->autoneg_wait_to_complete) { |
3017 | hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n"); | 3160 | e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); |
3018 | 3161 | ||
3019 | ret_val = e1000e_phy_has_link_generic(hw, | 3162 | ret_val = e1000e_phy_has_link_generic(hw, |
3020 | PHY_FORCE_LIMIT, | 3163 | PHY_FORCE_LIMIT, |
@@ -3024,7 +3167,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3024 | goto out; | 3167 | goto out; |
3025 | 3168 | ||
3026 | if (!link) | 3169 | if (!link) |
3027 | hw_dbg(hw, "Link taking longer than expected.\n"); | 3170 | e_dbg("Link taking longer than expected.\n"); |
3028 | 3171 | ||
3029 | /* Try once more */ | 3172 | /* Try once more */ |
3030 | ret_val = e1000e_phy_has_link_generic(hw, | 3173 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -3060,7 +3203,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3060 | goto out; | 3203 | goto out; |
3061 | 3204 | ||
3062 | if (!link) { | 3205 | if (!link) { |
3063 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 3206 | e_dbg("Phy info is only valid if link is up\n"); |
3064 | ret_val = -E1000_ERR_CONFIG; | 3207 | ret_val = -E1000_ERR_CONFIG; |
3065 | goto out; | 3208 | goto out; |
3066 | } | 3209 | } |
@@ -3071,7 +3214,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3071 | if (ret_val) | 3214 | if (ret_val) |
3072 | goto out; | 3215 | goto out; |
3073 | 3216 | ||
3074 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 3217 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
3075 | if (ret_val) | 3218 | if (ret_val) |
3076 | goto out; | 3219 | goto out; |
3077 | 3220 | ||
@@ -3083,7 +3226,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3083 | if (ret_val) | 3226 | if (ret_val) |
3084 | goto out; | 3227 | goto out; |
3085 | 3228 | ||
3086 | ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data); | 3229 | ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); |
3087 | if (ret_val) | 3230 | if (ret_val) |
3088 | goto out; | 3231 | goto out; |
3089 | 3232 | ||
@@ -3117,7 +3260,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3117 | s32 ret_val; | 3260 | s32 ret_val; |
3118 | u16 phy_data, length; | 3261 | u16 phy_data, length; |
3119 | 3262 | ||
3120 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); | 3263 | ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); |
3121 | if (ret_val) | 3264 | if (ret_val) |
3122 | goto out; | 3265 | goto out; |
3123 | 3266 | ||
@@ -3125,7 +3268,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3125 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; | 3268 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; |
3126 | 3269 | ||
3127 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | 3270 | if (length == E1000_CABLE_LENGTH_UNDEFINED) |
3128 | ret_val = E1000_ERR_PHY; | 3271 | ret_val = -E1000_ERR_PHY; |
3129 | 3272 | ||
3130 | phy->cable_length = length; | 3273 | phy->cable_length = length; |
3131 | 3274 | ||