diff options
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r-- | drivers/net/e1000e/82571.c | 323 | ||||
-rw-r--r-- | drivers/net/e1000e/defines.h | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 49 | ||||
-rw-r--r-- | drivers/net/e1000e/es2lan.c | 213 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 93 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 52 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 534 | ||||
-rw-r--r-- | drivers/net/e1000e/lib.c | 261 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 520 | ||||
-rw-r--r-- | drivers/net/e1000e/param.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 580 |
11 files changed, 1407 insertions, 1222 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index d1e0563a67df..c1a42cfc80ba 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -43,10 +43,6 @@ | |||
43 | * 82583V Gigabit Network Connection | 43 | * 82583V Gigabit Network Connection |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/delay.h> | ||
48 | #include <linux/pci.h> | ||
49 | |||
50 | #include "e1000.h" | 46 | #include "e1000.h" |
51 | 47 | ||
52 | #define ID_LED_RESERVED_F746 0xF746 | 48 | #define ID_LED_RESERVED_F746 0xF746 |
@@ -69,15 +65,15 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); | |||
69 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); | 65 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); |
70 | static s32 e1000_setup_link_82571(struct e1000_hw *hw); | 66 | static s32 e1000_setup_link_82571(struct e1000_hw *hw); |
71 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); | 67 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); |
68 | static void e1000_clear_vfta_82571(struct e1000_hw *hw); | ||
72 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); | 69 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); |
73 | static s32 e1000_led_on_82574(struct e1000_hw *hw); | 70 | static s32 e1000_led_on_82574(struct e1000_hw *hw); |
74 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | 71 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); |
72 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); | ||
75 | 73 | ||
76 | /** | 74 | /** |
77 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | 75 | * e1000_init_phy_params_82571 - Init PHY func ptrs. |
78 | * @hw: pointer to the HW structure | 76 | * @hw: pointer to the HW structure |
79 | * | ||
80 | * This is a function pointer entry point called by the api module. | ||
81 | **/ | 77 | **/ |
82 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | 78 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) |
83 | { | 79 | { |
@@ -93,6 +89,9 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
93 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 89 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
94 | phy->reset_delay_us = 100; | 90 | phy->reset_delay_us = 100; |
95 | 91 | ||
92 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
93 | phy->ops.power_down = e1000_power_down_phy_copper_82571; | ||
94 | |||
96 | switch (hw->mac.type) { | 95 | switch (hw->mac.type) { |
97 | case e1000_82571: | 96 | case e1000_82571: |
98 | case e1000_82572: | 97 | case e1000_82572: |
@@ -140,8 +139,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
140 | /** | 139 | /** |
141 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. | 140 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. |
142 | * @hw: pointer to the HW structure | 141 | * @hw: pointer to the HW structure |
143 | * | ||
144 | * This is a function pointer entry point called by the api module. | ||
145 | **/ | 142 | **/ |
146 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | 143 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) |
147 | { | 144 | { |
@@ -205,8 +202,6 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
205 | /** | 202 | /** |
206 | * e1000_init_mac_params_82571 - Init MAC func ptrs. | 203 | * e1000_init_mac_params_82571 - Init MAC func ptrs. |
207 | * @hw: pointer to the HW structure | 204 | * @hw: pointer to the HW structure |
208 | * | ||
209 | * This is a function pointer entry point called by the api module. | ||
210 | **/ | 205 | **/ |
211 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | 206 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) |
212 | { | 207 | { |
@@ -240,7 +235,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
240 | /* Set rar entry count */ | 235 | /* Set rar entry count */ |
241 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 236 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
242 | /* Set if manageability features are enabled. */ | 237 | /* Set if manageability features are enabled. */ |
243 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 238 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
239 | ? true : false; | ||
244 | 240 | ||
245 | /* check for link */ | 241 | /* check for link */ |
246 | switch (hw->phy.media_type) { | 242 | switch (hw->phy.media_type) { |
@@ -313,7 +309,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
313 | * indicates that the bootagent or EFI code has | 309 | * indicates that the bootagent or EFI code has |
314 | * improperly left this bit enabled | 310 | * improperly left this bit enabled |
315 | */ | 311 | */ |
316 | hw_dbg(hw, "Please update your 82571 Bootagent\n"); | 312 | e_dbg("Please update your 82571 Bootagent\n"); |
317 | } | 313 | } |
318 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | 314 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); |
319 | } | 315 | } |
@@ -487,7 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
487 | } | 483 | } |
488 | 484 | ||
489 | if (i == sw_timeout) { | 485 | if (i == sw_timeout) { |
490 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 486 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
491 | hw->dev_spec.e82571.smb_counter++; | 487 | hw->dev_spec.e82571.smb_counter++; |
492 | } | 488 | } |
493 | /* Get the FW semaphore. */ | 489 | /* Get the FW semaphore. */ |
@@ -505,7 +501,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
505 | if (i == fw_timeout) { | 501 | if (i == fw_timeout) { |
506 | /* Release semaphores */ | 502 | /* Release semaphores */ |
507 | e1000_put_hw_semaphore_82571(hw); | 503 | e1000_put_hw_semaphore_82571(hw); |
508 | hw_dbg(hw, "Driver can't access the NVM\n"); | 504 | e_dbg("Driver can't access the NVM\n"); |
509 | return -E1000_ERR_NVM; | 505 | return -E1000_ERR_NVM; |
510 | } | 506 | } |
511 | 507 | ||
@@ -702,8 +698,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
702 | u16 words, u16 *data) | 698 | u16 words, u16 *data) |
703 | { | 699 | { |
704 | struct e1000_nvm_info *nvm = &hw->nvm; | 700 | struct e1000_nvm_info *nvm = &hw->nvm; |
705 | u32 i; | 701 | u32 i, eewr = 0; |
706 | u32 eewr = 0; | ||
707 | s32 ret_val = 0; | 702 | s32 ret_val = 0; |
708 | 703 | ||
709 | /* | 704 | /* |
@@ -712,7 +707,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
712 | */ | 707 | */ |
713 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 708 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
714 | (words == 0)) { | 709 | (words == 0)) { |
715 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 710 | e_dbg("nvm parameter(s) out of bounds\n"); |
716 | return -E1000_ERR_NVM; | 711 | return -E1000_ERR_NVM; |
717 | } | 712 | } |
718 | 713 | ||
@@ -753,7 +748,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
753 | timeout--; | 748 | timeout--; |
754 | } | 749 | } |
755 | if (!timeout) { | 750 | if (!timeout) { |
756 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 751 | e_dbg("MNG configuration cycle has not completed.\n"); |
757 | return -E1000_ERR_RESET; | 752 | return -E1000_ERR_RESET; |
758 | } | 753 | } |
759 | 754 | ||
@@ -763,7 +758,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
763 | /** | 758 | /** |
764 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state | 759 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state |
765 | * @hw: pointer to the HW structure | 760 | * @hw: pointer to the HW structure |
766 | * @active: TRUE to enable LPLU, FALSE to disable | 761 | * @active: true to enable LPLU, false to disable |
767 | * | 762 | * |
768 | * Sets the LPLU D0 state according to the active flag. When activating LPLU | 763 | * Sets the LPLU D0 state according to the active flag. When activating LPLU |
769 | * this function also disables smart speed and vice versa. LPLU will not be | 764 | * this function also disables smart speed and vice versa. LPLU will not be |
@@ -834,15 +829,11 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
834 | * e1000_reset_hw_82571 - Reset hardware | 829 | * e1000_reset_hw_82571 - Reset hardware |
835 | * @hw: pointer to the HW structure | 830 | * @hw: pointer to the HW structure |
836 | * | 831 | * |
837 | * This resets the hardware into a known state. This is a | 832 | * This resets the hardware into a known state. |
838 | * function pointer entry point called by the api module. | ||
839 | **/ | 833 | **/ |
840 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 834 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
841 | { | 835 | { |
842 | u32 ctrl; | 836 | u32 ctrl, extcnf_ctrl, ctrl_ext, icr; |
843 | u32 extcnf_ctrl; | ||
844 | u32 ctrl_ext; | ||
845 | u32 icr; | ||
846 | s32 ret_val; | 837 | s32 ret_val; |
847 | u16 i = 0; | 838 | u16 i = 0; |
848 | 839 | ||
@@ -852,9 +843,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
852 | */ | 843 | */ |
853 | ret_val = e1000e_disable_pcie_master(hw); | 844 | ret_val = e1000e_disable_pcie_master(hw); |
854 | if (ret_val) | 845 | if (ret_val) |
855 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 846 | e_dbg("PCI-E Master disable polling has failed.\n"); |
856 | 847 | ||
857 | hw_dbg(hw, "Masking off all interrupts\n"); | 848 | e_dbg("Masking off all interrupts\n"); |
858 | ew32(IMC, 0xffffffff); | 849 | ew32(IMC, 0xffffffff); |
859 | 850 | ||
860 | ew32(RCTL, 0); | 851 | ew32(RCTL, 0); |
@@ -893,7 +884,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
893 | 884 | ||
894 | ctrl = er32(CTRL); | 885 | ctrl = er32(CTRL); |
895 | 886 | ||
896 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 887 | e_dbg("Issuing a global reset to MAC\n"); |
897 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 888 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
898 | 889 | ||
899 | if (hw->nvm.type == e1000_nvm_flash_hw) { | 890 | if (hw->nvm.type == e1000_nvm_flash_hw) { |
@@ -951,21 +942,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
951 | struct e1000_mac_info *mac = &hw->mac; | 942 | struct e1000_mac_info *mac = &hw->mac; |
952 | u32 reg_data; | 943 | u32 reg_data; |
953 | s32 ret_val; | 944 | s32 ret_val; |
954 | u16 i; | 945 | u16 i, rar_count = mac->rar_entry_count; |
955 | u16 rar_count = mac->rar_entry_count; | ||
956 | 946 | ||
957 | e1000_initialize_hw_bits_82571(hw); | 947 | e1000_initialize_hw_bits_82571(hw); |
958 | 948 | ||
959 | /* Initialize identification LED */ | 949 | /* Initialize identification LED */ |
960 | ret_val = e1000e_id_led_init(hw); | 950 | ret_val = e1000e_id_led_init(hw); |
961 | if (ret_val) { | 951 | if (ret_val) |
962 | hw_dbg(hw, "Error initializing identification LED\n"); | 952 | e_dbg("Error initializing identification LED\n"); |
963 | return ret_val; | 953 | /* This is not fatal and we should not stop init due to this */ |
964 | } | ||
965 | 954 | ||
966 | /* Disabling VLAN filtering */ | 955 | /* Disabling VLAN filtering */ |
967 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 956 | e_dbg("Initializing the IEEE VLAN\n"); |
968 | e1000e_clear_vfta(hw); | 957 | mac->ops.clear_vfta(hw); |
969 | 958 | ||
970 | /* Setup the receive address. */ | 959 | /* Setup the receive address. */ |
971 | /* | 960 | /* |
@@ -978,7 +967,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
978 | e1000e_init_rx_addrs(hw, rar_count); | 967 | e1000e_init_rx_addrs(hw, rar_count); |
979 | 968 | ||
980 | /* Zero out the Multicast HASH table */ | 969 | /* Zero out the Multicast HASH table */ |
981 | hw_dbg(hw, "Zeroing the MTA\n"); | 970 | e_dbg("Zeroing the MTA\n"); |
982 | for (i = 0; i < mac->mta_reg_count; i++) | 971 | for (i = 0; i < mac->mta_reg_count; i++) |
983 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 972 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
984 | 973 | ||
@@ -1125,6 +1114,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1125 | reg |= (1 << 22); | 1114 | reg |= (1 << 22); |
1126 | ew32(GCR, reg); | 1115 | ew32(GCR, reg); |
1127 | 1116 | ||
1117 | /* | ||
1118 | * Workaround for hardware errata. | ||
1119 | * apply workaround for hardware errata documented in errata | ||
1120 | * docs Fixes issue where some error prone or unreliable PCIe | ||
1121 | * completions are occurring, particularly with ASPM enabled. | ||
1122 | * Without fix, issue can cause tx timeouts. | ||
1123 | */ | ||
1128 | reg = er32(GCR2); | 1124 | reg = er32(GCR2); |
1129 | reg |= 1; | 1125 | reg |= 1; |
1130 | ew32(GCR2, reg); | 1126 | ew32(GCR2, reg); |
@@ -1137,13 +1133,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1137 | } | 1133 | } |
1138 | 1134 | ||
1139 | /** | 1135 | /** |
1140 | * e1000e_clear_vfta - Clear VLAN filter table | 1136 | * e1000_clear_vfta_82571 - Clear VLAN filter table |
1141 | * @hw: pointer to the HW structure | 1137 | * @hw: pointer to the HW structure |
1142 | * | 1138 | * |
1143 | * Clears the register array which contains the VLAN filter table by | 1139 | * Clears the register array which contains the VLAN filter table by |
1144 | * setting all the values to 0. | 1140 | * setting all the values to 0. |
1145 | **/ | 1141 | **/ |
1146 | void e1000e_clear_vfta(struct e1000_hw *hw) | 1142 | static void e1000_clear_vfta_82571(struct e1000_hw *hw) |
1147 | { | 1143 | { |
1148 | u32 offset; | 1144 | u32 offset; |
1149 | u32 vfta_value = 0; | 1145 | u32 vfta_value = 0; |
@@ -1360,8 +1356,20 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) | |||
1360 | * e1000_check_for_serdes_link_82571 - Check for link (Serdes) | 1356 | * e1000_check_for_serdes_link_82571 - Check for link (Serdes) |
1361 | * @hw: pointer to the HW structure | 1357 | * @hw: pointer to the HW structure |
1362 | * | 1358 | * |
1363 | * Checks for link up on the hardware. If link is not up and we have | 1359 | * Reports the link state as up or down. |
1364 | * a signal, then we need to force link up. | 1360 | * |
1361 | * If autonegotiation is supported by the link partner, the link state is | ||
1362 | * determined by the result of autonegotiation. This is the most likely case. | ||
1363 | * If autonegotiation is not supported by the link partner, and the link | ||
1364 | * has a valid signal, force the link up. | ||
1365 | * | ||
1366 | * The link state is represented internally here by 4 states: | ||
1367 | * | ||
1368 | * 1) down | ||
1369 | * 2) autoneg_progress | ||
1370 | * 3) autoneg_complete (the link sucessfully autonegotiated) | ||
1371 | * 4) forced_up (the link has been forced up, it did not autonegotiate) | ||
1372 | * | ||
1365 | **/ | 1373 | **/ |
1366 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | 1374 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) |
1367 | { | 1375 | { |
@@ -1387,7 +1395,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1387 | */ | 1395 | */ |
1388 | mac->serdes_link_state = | 1396 | mac->serdes_link_state = |
1389 | e1000_serdes_link_autoneg_progress; | 1397 | e1000_serdes_link_autoneg_progress; |
1390 | hw_dbg(hw, "AN_UP -> AN_PROG\n"); | 1398 | mac->serdes_has_link = false; |
1399 | e_dbg("AN_UP -> AN_PROG\n"); | ||
1391 | } | 1400 | } |
1392 | break; | 1401 | break; |
1393 | 1402 | ||
@@ -1401,79 +1410,86 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1401 | if (rxcw & E1000_RXCW_C) { | 1410 | if (rxcw & E1000_RXCW_C) { |
1402 | /* Enable autoneg, and unforce link up */ | 1411 | /* Enable autoneg, and unforce link up */ |
1403 | ew32(TXCW, mac->txcw); | 1412 | ew32(TXCW, mac->txcw); |
1404 | ew32(CTRL, | 1413 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
1405 | (ctrl & ~E1000_CTRL_SLU)); | ||
1406 | mac->serdes_link_state = | 1414 | mac->serdes_link_state = |
1407 | e1000_serdes_link_autoneg_progress; | 1415 | e1000_serdes_link_autoneg_progress; |
1408 | hw_dbg(hw, "FORCED_UP -> AN_PROG\n"); | 1416 | mac->serdes_has_link = false; |
1417 | e_dbg("FORCED_UP -> AN_PROG\n"); | ||
1409 | } | 1418 | } |
1410 | break; | 1419 | break; |
1411 | 1420 | ||
1412 | case e1000_serdes_link_autoneg_progress: | 1421 | case e1000_serdes_link_autoneg_progress: |
1413 | /* | 1422 | if (rxcw & E1000_RXCW_C) { |
1414 | * If the LU bit is set in the STATUS register, | 1423 | /* |
1415 | * autoneg has completed sucessfully. If not, | 1424 | * We received /C/ ordered sets, meaning the |
1416 | * try foring the link because the far end may be | 1425 | * link partner has autonegotiated, and we can |
1417 | * available but not capable of autonegotiation. | 1426 | * trust the Link Up (LU) status bit. |
1418 | */ | 1427 | */ |
1419 | if (status & E1000_STATUS_LU) { | 1428 | if (status & E1000_STATUS_LU) { |
1420 | mac->serdes_link_state = | 1429 | mac->serdes_link_state = |
1421 | e1000_serdes_link_autoneg_complete; | 1430 | e1000_serdes_link_autoneg_complete; |
1422 | hw_dbg(hw, "AN_PROG -> AN_UP\n"); | 1431 | e_dbg("AN_PROG -> AN_UP\n"); |
1432 | mac->serdes_has_link = true; | ||
1433 | } else { | ||
1434 | /* Autoneg completed, but failed. */ | ||
1435 | mac->serdes_link_state = | ||
1436 | e1000_serdes_link_down; | ||
1437 | e_dbg("AN_PROG -> DOWN\n"); | ||
1438 | } | ||
1423 | } else { | 1439 | } else { |
1424 | /* | 1440 | /* |
1425 | * Disable autoneg, force link up and | 1441 | * The link partner did not autoneg. |
1426 | * full duplex, and change state to forced | 1442 | * Force link up and full duplex, and change |
1443 | * state to forced. | ||
1427 | */ | 1444 | */ |
1428 | ew32(TXCW, | 1445 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
1429 | (mac->txcw & ~E1000_TXCW_ANE)); | ||
1430 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | 1446 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
1431 | ew32(CTRL, ctrl); | 1447 | ew32(CTRL, ctrl); |
1432 | 1448 | ||
1433 | /* Configure Flow Control after link up. */ | 1449 | /* Configure Flow Control after link up. */ |
1434 | ret_val = | 1450 | ret_val = e1000e_config_fc_after_link_up(hw); |
1435 | e1000e_config_fc_after_link_up(hw); | ||
1436 | if (ret_val) { | 1451 | if (ret_val) { |
1437 | hw_dbg(hw, "Error config flow control\n"); | 1452 | e_dbg("Error config flow control\n"); |
1438 | break; | 1453 | break; |
1439 | } | 1454 | } |
1440 | mac->serdes_link_state = | 1455 | mac->serdes_link_state = |
1441 | e1000_serdes_link_forced_up; | 1456 | e1000_serdes_link_forced_up; |
1442 | hw_dbg(hw, "AN_PROG -> FORCED_UP\n"); | 1457 | mac->serdes_has_link = true; |
1458 | e_dbg("AN_PROG -> FORCED_UP\n"); | ||
1443 | } | 1459 | } |
1444 | mac->serdes_has_link = true; | ||
1445 | break; | 1460 | break; |
1446 | 1461 | ||
1447 | case e1000_serdes_link_down: | 1462 | case e1000_serdes_link_down: |
1448 | default: | 1463 | default: |
1449 | /* The link was down but the receiver has now gained | 1464 | /* |
1465 | * The link was down but the receiver has now gained | ||
1450 | * valid sync, so lets see if we can bring the link | 1466 | * valid sync, so lets see if we can bring the link |
1451 | * up. */ | 1467 | * up. |
1468 | */ | ||
1452 | ew32(TXCW, mac->txcw); | 1469 | ew32(TXCW, mac->txcw); |
1453 | ew32(CTRL, | 1470 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
1454 | (ctrl & ~E1000_CTRL_SLU)); | ||
1455 | mac->serdes_link_state = | 1471 | mac->serdes_link_state = |
1456 | e1000_serdes_link_autoneg_progress; | 1472 | e1000_serdes_link_autoneg_progress; |
1457 | hw_dbg(hw, "DOWN -> AN_PROG\n"); | 1473 | e_dbg("DOWN -> AN_PROG\n"); |
1458 | break; | 1474 | break; |
1459 | } | 1475 | } |
1460 | } else { | 1476 | } else { |
1461 | if (!(rxcw & E1000_RXCW_SYNCH)) { | 1477 | if (!(rxcw & E1000_RXCW_SYNCH)) { |
1462 | mac->serdes_has_link = false; | 1478 | mac->serdes_has_link = false; |
1463 | mac->serdes_link_state = e1000_serdes_link_down; | 1479 | mac->serdes_link_state = e1000_serdes_link_down; |
1464 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1480 | e_dbg("ANYSTATE -> DOWN\n"); |
1465 | } else { | 1481 | } else { |
1466 | /* | 1482 | /* |
1467 | * We have sync, and can tolerate one | 1483 | * We have sync, and can tolerate one invalid (IV) |
1468 | * invalid (IV) codeword before declaring | 1484 | * codeword before declaring link down, so reread |
1469 | * link down, so reread to look again | 1485 | * to look again. |
1470 | */ | 1486 | */ |
1471 | udelay(10); | 1487 | udelay(10); |
1472 | rxcw = er32(RXCW); | 1488 | rxcw = er32(RXCW); |
1473 | if (rxcw & E1000_RXCW_IV) { | 1489 | if (rxcw & E1000_RXCW_IV) { |
1474 | mac->serdes_link_state = e1000_serdes_link_down; | 1490 | mac->serdes_link_state = e1000_serdes_link_down; |
1475 | mac->serdes_has_link = false; | 1491 | mac->serdes_has_link = false; |
1476 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1492 | e_dbg("ANYSTATE -> DOWN\n"); |
1477 | } | 1493 | } |
1478 | } | 1494 | } |
1479 | } | 1495 | } |
@@ -1495,7 +1511,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1495 | 1511 | ||
1496 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1512 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1497 | if (ret_val) { | 1513 | if (ret_val) { |
1498 | hw_dbg(hw, "NVM Read Error\n"); | 1514 | e_dbg("NVM Read Error\n"); |
1499 | return ret_val; | 1515 | return ret_val; |
1500 | } | 1516 | } |
1501 | 1517 | ||
@@ -1525,7 +1541,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1525 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | 1541 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) |
1526 | { | 1542 | { |
1527 | if (hw->mac.type != e1000_82571) | 1543 | if (hw->mac.type != e1000_82571) |
1528 | return 0; | 1544 | return false; |
1529 | 1545 | ||
1530 | return hw->dev_spec.e82571.laa_is_present; | 1546 | return hw->dev_spec.e82571.laa_is_present; |
1531 | } | 1547 | } |
@@ -1535,7 +1551,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | |||
1535 | * @hw: pointer to the HW structure | 1551 | * @hw: pointer to the HW structure |
1536 | * @state: enable/disable locally administered address | 1552 | * @state: enable/disable locally administered address |
1537 | * | 1553 | * |
1538 | * Enable/Disable the current locally administers address state. | 1554 | * Enable/Disable the current locally administered address state. |
1539 | **/ | 1555 | **/ |
1540 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) | 1556 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) |
1541 | { | 1557 | { |
@@ -1609,6 +1625,28 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1609 | } | 1625 | } |
1610 | 1626 | ||
1611 | /** | 1627 | /** |
1628 | * e1000_power_down_phy_copper_82571 - Remove link during PHY power down | ||
1629 | * @hw: pointer to the HW structure | ||
1630 | * | ||
1631 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1632 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1633 | **/ | ||
1634 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) | ||
1635 | { | ||
1636 | struct e1000_phy_info *phy = &hw->phy; | ||
1637 | struct e1000_mac_info *mac = &hw->mac; | ||
1638 | |||
1639 | if (!(phy->ops.check_reset_block)) | ||
1640 | return; | ||
1641 | |||
1642 | /* If the management interface is not enabled, then power down */ | ||
1643 | if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) | ||
1644 | e1000_power_down_phy_copper(hw); | ||
1645 | |||
1646 | return; | ||
1647 | } | ||
1648 | |||
1649 | /** | ||
1612 | * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters | 1650 | * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters |
1613 | * @hw: pointer to the HW structure | 1651 | * @hw: pointer to the HW structure |
1614 | * | 1652 | * |
@@ -1616,44 +1654,42 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1616 | **/ | 1654 | **/ |
1617 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) | 1655 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) |
1618 | { | 1656 | { |
1619 | u32 temp; | ||
1620 | |||
1621 | e1000e_clear_hw_cntrs_base(hw); | 1657 | e1000e_clear_hw_cntrs_base(hw); |
1622 | 1658 | ||
1623 | temp = er32(PRC64); | 1659 | er32(PRC64); |
1624 | temp = er32(PRC127); | 1660 | er32(PRC127); |
1625 | temp = er32(PRC255); | 1661 | er32(PRC255); |
1626 | temp = er32(PRC511); | 1662 | er32(PRC511); |
1627 | temp = er32(PRC1023); | 1663 | er32(PRC1023); |
1628 | temp = er32(PRC1522); | 1664 | er32(PRC1522); |
1629 | temp = er32(PTC64); | 1665 | er32(PTC64); |
1630 | temp = er32(PTC127); | 1666 | er32(PTC127); |
1631 | temp = er32(PTC255); | 1667 | er32(PTC255); |
1632 | temp = er32(PTC511); | 1668 | er32(PTC511); |
1633 | temp = er32(PTC1023); | 1669 | er32(PTC1023); |
1634 | temp = er32(PTC1522); | 1670 | er32(PTC1522); |
1635 | 1671 | ||
1636 | temp = er32(ALGNERRC); | 1672 | er32(ALGNERRC); |
1637 | temp = er32(RXERRC); | 1673 | er32(RXERRC); |
1638 | temp = er32(TNCRS); | 1674 | er32(TNCRS); |
1639 | temp = er32(CEXTERR); | 1675 | er32(CEXTERR); |
1640 | temp = er32(TSCTC); | 1676 | er32(TSCTC); |
1641 | temp = er32(TSCTFC); | 1677 | er32(TSCTFC); |
1642 | 1678 | ||
1643 | temp = er32(MGTPRC); | 1679 | er32(MGTPRC); |
1644 | temp = er32(MGTPDC); | 1680 | er32(MGTPDC); |
1645 | temp = er32(MGTPTC); | 1681 | er32(MGTPTC); |
1646 | 1682 | ||
1647 | temp = er32(IAC); | 1683 | er32(IAC); |
1648 | temp = er32(ICRXOC); | 1684 | er32(ICRXOC); |
1649 | 1685 | ||
1650 | temp = er32(ICRXPTC); | 1686 | er32(ICRXPTC); |
1651 | temp = er32(ICRXATC); | 1687 | er32(ICRXATC); |
1652 | temp = er32(ICTXPTC); | 1688 | er32(ICTXPTC); |
1653 | temp = er32(ICTXATC); | 1689 | er32(ICTXATC); |
1654 | temp = er32(ICTXQEC); | 1690 | er32(ICTXQEC); |
1655 | temp = er32(ICTXQMTC); | 1691 | er32(ICTXQMTC); |
1656 | temp = er32(ICRXDMTC); | 1692 | er32(ICRXDMTC); |
1657 | } | 1693 | } |
1658 | 1694 | ||
1659 | static struct e1000_mac_operations e82571_mac_ops = { | 1695 | static struct e1000_mac_operations e82571_mac_ops = { |
@@ -1667,6 +1703,8 @@ static struct e1000_mac_operations e82571_mac_ops = { | |||
1667 | /* .led_on: mac type dependent */ | 1703 | /* .led_on: mac type dependent */ |
1668 | .led_off = e1000e_led_off_generic, | 1704 | .led_off = e1000e_led_off_generic, |
1669 | .update_mc_addr_list = e1000_update_mc_addr_list_82571, | 1705 | .update_mc_addr_list = e1000_update_mc_addr_list_82571, |
1706 | .write_vfta = e1000_write_vfta_generic, | ||
1707 | .clear_vfta = e1000_clear_vfta_82571, | ||
1670 | .reset_hw = e1000_reset_hw_82571, | 1708 | .reset_hw = e1000_reset_hw_82571, |
1671 | .init_hw = e1000_init_hw_82571, | 1709 | .init_hw = e1000_init_hw_82571, |
1672 | .setup_link = e1000_setup_link_82571, | 1710 | .setup_link = e1000_setup_link_82571, |
@@ -1675,64 +1713,67 @@ static struct e1000_mac_operations e82571_mac_ops = { | |||
1675 | }; | 1713 | }; |
1676 | 1714 | ||
1677 | static struct e1000_phy_operations e82_phy_ops_igp = { | 1715 | static struct e1000_phy_operations e82_phy_ops_igp = { |
1678 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1716 | .acquire = e1000_get_hw_semaphore_82571, |
1717 | .check_polarity = e1000_check_polarity_igp, | ||
1679 | .check_reset_block = e1000e_check_reset_block_generic, | 1718 | .check_reset_block = e1000e_check_reset_block_generic, |
1680 | .commit_phy = NULL, | 1719 | .commit = NULL, |
1681 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, | 1720 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, |
1682 | .get_cfg_done = e1000_get_cfg_done_82571, | 1721 | .get_cfg_done = e1000_get_cfg_done_82571, |
1683 | .get_cable_length = e1000e_get_cable_length_igp_2, | 1722 | .get_cable_length = e1000e_get_cable_length_igp_2, |
1684 | .get_phy_info = e1000e_get_phy_info_igp, | 1723 | .get_info = e1000e_get_phy_info_igp, |
1685 | .read_phy_reg = e1000e_read_phy_reg_igp, | 1724 | .read_reg = e1000e_read_phy_reg_igp, |
1686 | .release_phy = e1000_put_hw_semaphore_82571, | 1725 | .release = e1000_put_hw_semaphore_82571, |
1687 | .reset_phy = e1000e_phy_hw_reset_generic, | 1726 | .reset = e1000e_phy_hw_reset_generic, |
1688 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1727 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1689 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1728 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1690 | .write_phy_reg = e1000e_write_phy_reg_igp, | 1729 | .write_reg = e1000e_write_phy_reg_igp, |
1691 | .cfg_on_link_up = NULL, | 1730 | .cfg_on_link_up = NULL, |
1692 | }; | 1731 | }; |
1693 | 1732 | ||
1694 | static struct e1000_phy_operations e82_phy_ops_m88 = { | 1733 | static struct e1000_phy_operations e82_phy_ops_m88 = { |
1695 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1734 | .acquire = e1000_get_hw_semaphore_82571, |
1735 | .check_polarity = e1000_check_polarity_m88, | ||
1696 | .check_reset_block = e1000e_check_reset_block_generic, | 1736 | .check_reset_block = e1000e_check_reset_block_generic, |
1697 | .commit_phy = e1000e_phy_sw_reset, | 1737 | .commit = e1000e_phy_sw_reset, |
1698 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1738 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1699 | .get_cfg_done = e1000e_get_cfg_done, | 1739 | .get_cfg_done = e1000e_get_cfg_done, |
1700 | .get_cable_length = e1000e_get_cable_length_m88, | 1740 | .get_cable_length = e1000e_get_cable_length_m88, |
1701 | .get_phy_info = e1000e_get_phy_info_m88, | 1741 | .get_info = e1000e_get_phy_info_m88, |
1702 | .read_phy_reg = e1000e_read_phy_reg_m88, | 1742 | .read_reg = e1000e_read_phy_reg_m88, |
1703 | .release_phy = e1000_put_hw_semaphore_82571, | 1743 | .release = e1000_put_hw_semaphore_82571, |
1704 | .reset_phy = e1000e_phy_hw_reset_generic, | 1744 | .reset = e1000e_phy_hw_reset_generic, |
1705 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1745 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1706 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1746 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1707 | .write_phy_reg = e1000e_write_phy_reg_m88, | 1747 | .write_reg = e1000e_write_phy_reg_m88, |
1708 | .cfg_on_link_up = NULL, | 1748 | .cfg_on_link_up = NULL, |
1709 | }; | 1749 | }; |
1710 | 1750 | ||
1711 | static struct e1000_phy_operations e82_phy_ops_bm = { | 1751 | static struct e1000_phy_operations e82_phy_ops_bm = { |
1712 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1752 | .acquire = e1000_get_hw_semaphore_82571, |
1753 | .check_polarity = e1000_check_polarity_m88, | ||
1713 | .check_reset_block = e1000e_check_reset_block_generic, | 1754 | .check_reset_block = e1000e_check_reset_block_generic, |
1714 | .commit_phy = e1000e_phy_sw_reset, | 1755 | .commit = e1000e_phy_sw_reset, |
1715 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1756 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1716 | .get_cfg_done = e1000e_get_cfg_done, | 1757 | .get_cfg_done = e1000e_get_cfg_done, |
1717 | .get_cable_length = e1000e_get_cable_length_m88, | 1758 | .get_cable_length = e1000e_get_cable_length_m88, |
1718 | .get_phy_info = e1000e_get_phy_info_m88, | 1759 | .get_info = e1000e_get_phy_info_m88, |
1719 | .read_phy_reg = e1000e_read_phy_reg_bm2, | 1760 | .read_reg = e1000e_read_phy_reg_bm2, |
1720 | .release_phy = e1000_put_hw_semaphore_82571, | 1761 | .release = e1000_put_hw_semaphore_82571, |
1721 | .reset_phy = e1000e_phy_hw_reset_generic, | 1762 | .reset = e1000e_phy_hw_reset_generic, |
1722 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1763 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1723 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1764 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1724 | .write_phy_reg = e1000e_write_phy_reg_bm2, | 1765 | .write_reg = e1000e_write_phy_reg_bm2, |
1725 | .cfg_on_link_up = NULL, | 1766 | .cfg_on_link_up = NULL, |
1726 | }; | 1767 | }; |
1727 | 1768 | ||
1728 | static struct e1000_nvm_operations e82571_nvm_ops = { | 1769 | static struct e1000_nvm_operations e82571_nvm_ops = { |
1729 | .acquire_nvm = e1000_acquire_nvm_82571, | 1770 | .acquire = e1000_acquire_nvm_82571, |
1730 | .read_nvm = e1000e_read_nvm_eerd, | 1771 | .read = e1000e_read_nvm_eerd, |
1731 | .release_nvm = e1000_release_nvm_82571, | 1772 | .release = e1000_release_nvm_82571, |
1732 | .update_nvm = e1000_update_nvm_checksum_82571, | 1773 | .update = e1000_update_nvm_checksum_82571, |
1733 | .valid_led_default = e1000_valid_led_default_82571, | 1774 | .valid_led_default = e1000_valid_led_default_82571, |
1734 | .validate_nvm = e1000_validate_nvm_checksum_82571, | 1775 | .validate = e1000_validate_nvm_checksum_82571, |
1735 | .write_nvm = e1000_write_nvm_82571, | 1776 | .write = e1000_write_nvm_82571, |
1736 | }; | 1777 | }; |
1737 | 1778 | ||
1738 | struct e1000_info e1000_82571_info = { | 1779 | struct e1000_info e1000_82571_info = { |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 1190167a8b3d..86d2809763c3 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 189dfa2d6c76..cebbd9079d53 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | ||
39 | 40 | ||
40 | #include "hw.h" | 41 | #include "hw.h" |
41 | 42 | ||
@@ -47,9 +48,9 @@ struct e1000_info; | |||
47 | 48 | ||
48 | #ifdef DEBUG | 49 | #ifdef DEBUG |
49 | #define e_dbg(format, arg...) \ | 50 | #define e_dbg(format, arg...) \ |
50 | e_printk(KERN_DEBUG , adapter, format, ## arg) | 51 | e_printk(KERN_DEBUG , hw->adapter, format, ## arg) |
51 | #else | 52 | #else |
52 | #define e_dbg(format, arg...) do { (void)(adapter); } while (0) | 53 | #define e_dbg(format, arg...) do { (void)(hw); } while (0) |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #define e_err(format, arg...) \ | 56 | #define e_err(format, arg...) \ |
@@ -141,6 +142,8 @@ struct e1000_info; | |||
141 | #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ | 142 | #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ |
142 | #define HV_TNCRS_LOWER PHY_REG(778, 30) | 143 | #define HV_TNCRS_LOWER PHY_REG(778, 30) |
143 | 144 | ||
145 | #define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ | ||
146 | |||
144 | /* BM PHY Copper Specific Status */ | 147 | /* BM PHY Copper Specific Status */ |
145 | #define BM_CS_STATUS 17 | 148 | #define BM_CS_STATUS 17 |
146 | #define BM_CS_STATUS_LINK_UP 0x0400 | 149 | #define BM_CS_STATUS_LINK_UP 0x0400 |
@@ -191,12 +194,15 @@ struct e1000_buffer { | |||
191 | unsigned long time_stamp; | 194 | unsigned long time_stamp; |
192 | u16 length; | 195 | u16 length; |
193 | u16 next_to_watch; | 196 | u16 next_to_watch; |
197 | u16 mapped_as_page; | ||
194 | }; | 198 | }; |
195 | /* Rx */ | 199 | /* Rx */ |
196 | /* arrays of page information for packet split */ | 200 | struct { |
197 | struct e1000_ps_page *ps_pages; | 201 | /* arrays of page information for packet split */ |
202 | struct e1000_ps_page *ps_pages; | ||
203 | struct page *page; | ||
204 | }; | ||
198 | }; | 205 | }; |
199 | struct page *page; | ||
200 | }; | 206 | }; |
201 | 207 | ||
202 | struct e1000_ring { | 208 | struct e1000_ring { |
@@ -329,7 +335,6 @@ struct e1000_adapter { | |||
329 | /* OS defined structs */ | 335 | /* OS defined structs */ |
330 | struct net_device *netdev; | 336 | struct net_device *netdev; |
331 | struct pci_dev *pdev; | 337 | struct pci_dev *pdev; |
332 | struct net_device_stats net_stats; | ||
333 | 338 | ||
334 | /* structs defined in e1000_hw.h */ | 339 | /* structs defined in e1000_hw.h */ |
335 | struct e1000_hw hw; | 340 | struct e1000_hw hw; |
@@ -364,6 +369,7 @@ struct e1000_adapter { | |||
364 | struct work_struct downshift_task; | 369 | struct work_struct downshift_task; |
365 | struct work_struct update_phy_task; | 370 | struct work_struct update_phy_task; |
366 | struct work_struct led_blink_task; | 371 | struct work_struct led_blink_task; |
372 | struct work_struct print_hang_task; | ||
367 | }; | 373 | }; |
368 | 374 | ||
369 | struct e1000_info { | 375 | struct e1000_info { |
@@ -486,6 +492,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
486 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | 492 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); |
487 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | 493 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); |
488 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); | 494 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); |
495 | extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
489 | 496 | ||
490 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | 497 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); |
491 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | 498 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); |
@@ -505,7 +512,7 @@ extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); | |||
505 | extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); | 512 | extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); |
506 | extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); | 513 | extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); |
507 | extern s32 e1000e_setup_link(struct e1000_hw *hw); | 514 | extern s32 e1000e_setup_link(struct e1000_hw *hw); |
508 | extern void e1000e_clear_vfta(struct e1000_hw *hw); | 515 | extern void e1000_clear_vfta_generic(struct e1000_hw *hw); |
509 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | 516 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); |
510 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | 517 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
511 | u8 *mc_addr_list, | 518 | u8 *mc_addr_list, |
@@ -521,7 +528,7 @@ extern void e1000e_config_collision_dist(struct e1000_hw *hw); | |||
521 | extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); | 528 | extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); |
522 | extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); | 529 | extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); |
523 | extern s32 e1000e_blink_led(struct e1000_hw *hw); | 530 | extern s32 e1000e_blink_led(struct e1000_hw *hw); |
524 | extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); | 531 | extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); |
525 | extern void e1000e_reset_adaptive(struct e1000_hw *hw); | 532 | extern void e1000e_reset_adaptive(struct e1000_hw *hw); |
526 | extern void e1000e_update_adaptive(struct e1000_hw *hw); | 533 | extern void e1000e_update_adaptive(struct e1000_hw *hw); |
527 | 534 | ||
@@ -564,6 +571,8 @@ extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | |||
564 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | 571 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, |
565 | u32 usec_interval, bool *success); | 572 | u32 usec_interval, bool *success); |
566 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); | 573 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); |
574 | extern void e1000_power_up_phy_copper(struct e1000_hw *hw); | ||
575 | extern void e1000_power_down_phy_copper(struct e1000_hw *hw); | ||
567 | extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | 576 | extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); |
568 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | 577 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); |
569 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); | 578 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); |
@@ -581,9 +590,15 @@ extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); | |||
581 | extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); | 590 | extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); |
582 | extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); | 591 | extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); |
583 | 592 | ||
593 | extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); | ||
594 | extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); | ||
595 | extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); | ||
596 | extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); | ||
597 | extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); | ||
598 | |||
584 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) | 599 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) |
585 | { | 600 | { |
586 | return hw->phy.ops.reset_phy(hw); | 601 | return hw->phy.ops.reset(hw); |
587 | } | 602 | } |
588 | 603 | ||
589 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | 604 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) |
@@ -593,12 +608,12 @@ static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | |||
593 | 608 | ||
594 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | 609 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) |
595 | { | 610 | { |
596 | return hw->phy.ops.read_phy_reg(hw, offset, data); | 611 | return hw->phy.ops.read_reg(hw, offset, data); |
597 | } | 612 | } |
598 | 613 | ||
599 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) | 614 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) |
600 | { | 615 | { |
601 | return hw->phy.ops.write_phy_reg(hw, offset, data); | 616 | return hw->phy.ops.write_reg(hw, offset, data); |
602 | } | 617 | } |
603 | 618 | ||
604 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) | 619 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) |
@@ -618,27 +633,27 @@ extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); | |||
618 | 633 | ||
619 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) | 634 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) |
620 | { | 635 | { |
621 | return hw->nvm.ops.validate_nvm(hw); | 636 | return hw->nvm.ops.validate(hw); |
622 | } | 637 | } |
623 | 638 | ||
624 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) | 639 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) |
625 | { | 640 | { |
626 | return hw->nvm.ops.update_nvm(hw); | 641 | return hw->nvm.ops.update(hw); |
627 | } | 642 | } |
628 | 643 | ||
629 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 644 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
630 | { | 645 | { |
631 | return hw->nvm.ops.read_nvm(hw, offset, words, data); | 646 | return hw->nvm.ops.read(hw, offset, words, data); |
632 | } | 647 | } |
633 | 648 | ||
634 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 649 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
635 | { | 650 | { |
636 | return hw->nvm.ops.write_nvm(hw, offset, words, data); | 651 | return hw->nvm.ops.write(hw, offset, words, data); |
637 | } | 652 | } |
638 | 653 | ||
639 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) | 654 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) |
640 | { | 655 | { |
641 | return hw->phy.ops.get_phy_info(hw); | 656 | return hw->phy.ops.get_info(hw); |
642 | } | 657 | } |
643 | 658 | ||
644 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) | 659 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index ae5d73689353..d2a104794609 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -31,11 +31,6 @@ | |||
31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) | 31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/pci.h> | ||
38 | |||
39 | #include "e1000.h" | 34 | #include "e1000.h" |
40 | 35 | ||
41 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 | 36 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 |
@@ -104,6 +99,8 @@ | |||
104 | */ | 99 | */ |
105 | static const u16 e1000_gg82563_cable_length_table[] = | 100 | static const u16 e1000_gg82563_cable_length_table[] = |
106 | { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; | 101 | { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; |
102 | #define GG82563_CABLE_LENGTH_TABLE_SIZE \ | ||
103 | ARRAY_SIZE(e1000_gg82563_cable_length_table) | ||
107 | 104 | ||
108 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); | 105 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); |
109 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); | 106 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); |
@@ -117,12 +114,11 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
117 | u16 *data); | 114 | u16 *data); |
118 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | 115 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, |
119 | u16 data); | 116 | u16 data); |
117 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); | ||
120 | 118 | ||
121 | /** | 119 | /** |
122 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. | 120 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. |
123 | * @hw: pointer to the HW structure | 121 | * @hw: pointer to the HW structure |
124 | * | ||
125 | * This is a function pointer entry point called by the api module. | ||
126 | **/ | 122 | **/ |
127 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | 123 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) |
128 | { | 124 | { |
@@ -132,6 +128,9 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
132 | if (hw->phy.media_type != e1000_media_type_copper) { | 128 | if (hw->phy.media_type != e1000_media_type_copper) { |
133 | phy->type = e1000_phy_none; | 129 | phy->type = e1000_phy_none; |
134 | return 0; | 130 | return 0; |
131 | } else { | ||
132 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
133 | phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; | ||
135 | } | 134 | } |
136 | 135 | ||
137 | phy->addr = 1; | 136 | phy->addr = 1; |
@@ -152,8 +151,6 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
152 | /** | 151 | /** |
153 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. | 152 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. |
154 | * @hw: pointer to the HW structure | 153 | * @hw: pointer to the HW structure |
155 | * | ||
156 | * This is a function pointer entry point called by the api module. | ||
157 | **/ | 154 | **/ |
158 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | 155 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) |
159 | { | 156 | { |
@@ -200,8 +197,6 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | |||
200 | /** | 197 | /** |
201 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. | 198 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. |
202 | * @hw: pointer to the HW structure | 199 | * @hw: pointer to the HW structure |
203 | * | ||
204 | * This is a function pointer entry point called by the api module. | ||
205 | **/ | 200 | **/ |
206 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | 201 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) |
207 | { | 202 | { |
@@ -224,7 +219,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
224 | /* Set rar entry count */ | 219 | /* Set rar entry count */ |
225 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 220 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
226 | /* Set if manageability features are enabled. */ | 221 | /* Set if manageability features are enabled. */ |
227 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 222 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
223 | ? true : false; | ||
228 | 224 | ||
229 | /* check for link */ | 225 | /* check for link */ |
230 | switch (hw->phy.media_type) { | 226 | switch (hw->phy.media_type) { |
@@ -272,8 +268,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) | |||
272 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY | 268 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY |
273 | * @hw: pointer to the HW structure | 269 | * @hw: pointer to the HW structure |
274 | * | 270 | * |
275 | * A wrapper to acquire access rights to the correct PHY. This is a | 271 | * A wrapper to acquire access rights to the correct PHY. |
276 | * function pointer entry point called by the api module. | ||
277 | **/ | 272 | **/ |
278 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | 273 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) |
279 | { | 274 | { |
@@ -287,8 +282,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | |||
287 | * e1000_release_phy_80003es2lan - Release rights to access PHY | 282 | * e1000_release_phy_80003es2lan - Release rights to access PHY |
288 | * @hw: pointer to the HW structure | 283 | * @hw: pointer to the HW structure |
289 | * | 284 | * |
290 | * A wrapper to release access rights to the correct PHY. This is a | 285 | * A wrapper to release access rights to the correct PHY. |
291 | * function pointer entry point called by the api module. | ||
292 | **/ | 286 | **/ |
293 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) | 287 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) |
294 | { | 288 | { |
@@ -333,8 +327,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) | |||
333 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM | 327 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM |
334 | * @hw: pointer to the HW structure | 328 | * @hw: pointer to the HW structure |
335 | * | 329 | * |
336 | * Acquire the semaphore to access the EEPROM. This is a function | 330 | * Acquire the semaphore to access the EEPROM. |
337 | * pointer entry point called by the api module. | ||
338 | **/ | 331 | **/ |
339 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | 332 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) |
340 | { | 333 | { |
@@ -356,8 +349,7 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | |||
356 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM | 349 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM |
357 | * @hw: pointer to the HW structure | 350 | * @hw: pointer to the HW structure |
358 | * | 351 | * |
359 | * Release the semaphore used to access the EEPROM. This is a | 352 | * Release the semaphore used to access the EEPROM. |
360 | * function pointer entry point called by the api module. | ||
361 | **/ | 353 | **/ |
362 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) | 354 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) |
363 | { | 355 | { |
@@ -399,8 +391,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
399 | } | 391 | } |
400 | 392 | ||
401 | if (i == timeout) { | 393 | if (i == timeout) { |
402 | hw_dbg(hw, | 394 | e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); |
403 | "Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
404 | return -E1000_ERR_SWFW_SYNC; | 395 | return -E1000_ERR_SWFW_SYNC; |
405 | } | 396 | } |
406 | 397 | ||
@@ -440,8 +431,7 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
440 | * @offset: offset of the register to read | 431 | * @offset: offset of the register to read |
441 | * @data: pointer to the data returned from the operation | 432 | * @data: pointer to the data returned from the operation |
442 | * | 433 | * |
443 | * Read the GG82563 PHY register. This is a function pointer entry | 434 | * Read the GG82563 PHY register. |
444 | * point called by the api module. | ||
445 | **/ | 435 | **/ |
446 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 436 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
447 | u32 offset, u16 *data) | 437 | u32 offset, u16 *data) |
@@ -505,8 +495,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
505 | * @offset: offset of the register to read | 495 | * @offset: offset of the register to read |
506 | * @data: value to write to the register | 496 | * @data: value to write to the register |
507 | * | 497 | * |
508 | * Write to the GG82563 PHY register. This is a function pointer entry | 498 | * Write to the GG82563 PHY register. |
509 | * point called by the api module. | ||
510 | **/ | 499 | **/ |
511 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 500 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
512 | u32 offset, u16 data) | 501 | u32 offset, u16 data) |
@@ -571,8 +560,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
571 | * @words: number of words to write | 560 | * @words: number of words to write |
572 | * @data: buffer of data to write to the NVM | 561 | * @data: buffer of data to write to the NVM |
573 | * | 562 | * |
574 | * Write "words" of data to the ESB2 NVM. This is a function | 563 | * Write "words" of data to the ESB2 NVM. |
575 | * pointer entry point called by the api module. | ||
576 | **/ | 564 | **/ |
577 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, | 565 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, |
578 | u16 words, u16 *data) | 566 | u16 words, u16 *data) |
@@ -602,7 +590,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) | |||
602 | timeout--; | 590 | timeout--; |
603 | } | 591 | } |
604 | if (!timeout) { | 592 | if (!timeout) { |
605 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 593 | e_dbg("MNG configuration cycle has not completed.\n"); |
606 | return -E1000_ERR_RESET; | 594 | return -E1000_ERR_RESET; |
607 | } | 595 | } |
608 | 596 | ||
@@ -635,7 +623,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
635 | if (ret_val) | 623 | if (ret_val) |
636 | return ret_val; | 624 | return ret_val; |
637 | 625 | ||
638 | hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); | 626 | e_dbg("GG82563 PSCR: %X\n", phy_data); |
639 | 627 | ||
640 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 628 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
641 | if (ret_val) | 629 | if (ret_val) |
@@ -653,7 +641,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
653 | udelay(1); | 641 | udelay(1); |
654 | 642 | ||
655 | if (hw->phy.autoneg_wait_to_complete) { | 643 | if (hw->phy.autoneg_wait_to_complete) { |
656 | hw_dbg(hw, "Waiting for forced speed/duplex link " | 644 | e_dbg("Waiting for forced speed/duplex link " |
657 | "on GG82563 phy.\n"); | 645 | "on GG82563 phy.\n"); |
658 | 646 | ||
659 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 647 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
@@ -712,21 +700,27 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
712 | static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | 700 | static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) |
713 | { | 701 | { |
714 | struct e1000_phy_info *phy = &hw->phy; | 702 | struct e1000_phy_info *phy = &hw->phy; |
715 | s32 ret_val; | 703 | s32 ret_val = 0; |
716 | u16 phy_data; | 704 | u16 phy_data, index; |
717 | u16 index; | ||
718 | 705 | ||
719 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); | 706 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); |
720 | if (ret_val) | 707 | if (ret_val) |
721 | return ret_val; | 708 | goto out; |
722 | 709 | ||
723 | index = phy_data & GG82563_DSPD_CABLE_LENGTH; | 710 | index = phy_data & GG82563_DSPD_CABLE_LENGTH; |
711 | |||
712 | if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { | ||
713 | ret_val = -E1000_ERR_PHY; | ||
714 | goto out; | ||
715 | } | ||
716 | |||
724 | phy->min_cable_length = e1000_gg82563_cable_length_table[index]; | 717 | phy->min_cable_length = e1000_gg82563_cable_length_table[index]; |
725 | phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; | 718 | phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; |
726 | 719 | ||
727 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | 720 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; |
728 | 721 | ||
729 | return 0; | 722 | out: |
723 | return ret_val; | ||
730 | } | 724 | } |
731 | 725 | ||
732 | /** | 726 | /** |
@@ -736,7 +730,6 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | |||
736 | * @duplex: pointer to duplex buffer | 730 | * @duplex: pointer to duplex buffer |
737 | * | 731 | * |
738 | * Retrieve the current speed and duplex configuration. | 732 | * Retrieve the current speed and duplex configuration. |
739 | * This is a function pointer entry point called by the api module. | ||
740 | **/ | 733 | **/ |
741 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | 734 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, |
742 | u16 *duplex) | 735 | u16 *duplex) |
@@ -762,12 +755,10 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | |||
762 | * @hw: pointer to the HW structure | 755 | * @hw: pointer to the HW structure |
763 | * | 756 | * |
764 | * Perform a global reset to the ESB2 controller. | 757 | * Perform a global reset to the ESB2 controller. |
765 | * This is a function pointer entry point called by the api module. | ||
766 | **/ | 758 | **/ |
767 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | 759 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) |
768 | { | 760 | { |
769 | u32 ctrl; | 761 | u32 ctrl, icr; |
770 | u32 icr; | ||
771 | s32 ret_val; | 762 | s32 ret_val; |
772 | 763 | ||
773 | /* | 764 | /* |
@@ -776,9 +767,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
776 | */ | 767 | */ |
777 | ret_val = e1000e_disable_pcie_master(hw); | 768 | ret_val = e1000e_disable_pcie_master(hw); |
778 | if (ret_val) | 769 | if (ret_val) |
779 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 770 | e_dbg("PCI-E Master disable polling has failed.\n"); |
780 | 771 | ||
781 | hw_dbg(hw, "Masking off all interrupts\n"); | 772 | e_dbg("Masking off all interrupts\n"); |
782 | ew32(IMC, 0xffffffff); | 773 | ew32(IMC, 0xffffffff); |
783 | 774 | ||
784 | ew32(RCTL, 0); | 775 | ew32(RCTL, 0); |
@@ -790,7 +781,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
790 | ctrl = er32(CTRL); | 781 | ctrl = er32(CTRL); |
791 | 782 | ||
792 | ret_val = e1000_acquire_phy_80003es2lan(hw); | 783 | ret_val = e1000_acquire_phy_80003es2lan(hw); |
793 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 784 | e_dbg("Issuing a global reset to MAC\n"); |
794 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 785 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
795 | e1000_release_phy_80003es2lan(hw); | 786 | e1000_release_phy_80003es2lan(hw); |
796 | 787 | ||
@@ -811,7 +802,6 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
811 | * @hw: pointer to the HW structure | 802 | * @hw: pointer to the HW structure |
812 | * | 803 | * |
813 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. | 804 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. |
814 | * This is a function pointer entry point called by the api module. | ||
815 | **/ | 805 | **/ |
816 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | 806 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) |
817 | { | 807 | { |
@@ -824,20 +814,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
824 | 814 | ||
825 | /* Initialize identification LED */ | 815 | /* Initialize identification LED */ |
826 | ret_val = e1000e_id_led_init(hw); | 816 | ret_val = e1000e_id_led_init(hw); |
827 | if (ret_val) { | 817 | if (ret_val) |
828 | hw_dbg(hw, "Error initializing identification LED\n"); | 818 | e_dbg("Error initializing identification LED\n"); |
829 | return ret_val; | 819 | /* This is not fatal and we should not stop init due to this */ |
830 | } | ||
831 | 820 | ||
832 | /* Disabling VLAN filtering */ | 821 | /* Disabling VLAN filtering */ |
833 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 822 | e_dbg("Initializing the IEEE VLAN\n"); |
834 | e1000e_clear_vfta(hw); | 823 | mac->ops.clear_vfta(hw); |
835 | 824 | ||
836 | /* Setup the receive address. */ | 825 | /* Setup the receive address. */ |
837 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 826 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
838 | 827 | ||
839 | /* Zero out the Multicast HASH table */ | 828 | /* Zero out the Multicast HASH table */ |
840 | hw_dbg(hw, "Zeroing the MTA\n"); | 829 | e_dbg("Zeroing the MTA\n"); |
841 | for (i = 0; i < mac->mta_reg_count; i++) | 830 | for (i = 0; i < mac->mta_reg_count; i++) |
842 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 831 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
843 | 832 | ||
@@ -994,7 +983,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
994 | /* SW Reset the PHY so all changes take effect */ | 983 | /* SW Reset the PHY so all changes take effect */ |
995 | ret_val = e1000e_commit_phy(hw); | 984 | ret_val = e1000e_commit_phy(hw); |
996 | if (ret_val) { | 985 | if (ret_val) { |
997 | hw_dbg(hw, "Error Resetting the PHY\n"); | 986 | e_dbg("Error Resetting the PHY\n"); |
998 | return ret_val; | 987 | return ret_val; |
999 | } | 988 | } |
1000 | 989 | ||
@@ -1318,6 +1307,23 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1318 | } | 1307 | } |
1319 | 1308 | ||
1320 | /** | 1309 | /** |
1310 | * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down | ||
1311 | * @hw: pointer to the HW structure | ||
1312 | * | ||
1313 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1314 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1315 | **/ | ||
1316 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) | ||
1317 | { | ||
1318 | /* If the management interface is not enabled, then power down */ | ||
1319 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
1320 | hw->phy.ops.check_reset_block(hw))) | ||
1321 | e1000_power_down_phy_copper(hw); | ||
1322 | |||
1323 | return; | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1321 | * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters | 1327 | * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters |
1322 | * @hw: pointer to the HW structure | 1328 | * @hw: pointer to the HW structure |
1323 | * | 1329 | * |
@@ -1325,44 +1331,42 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1325 | **/ | 1331 | **/ |
1326 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) | 1332 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) |
1327 | { | 1333 | { |
1328 | u32 temp; | ||
1329 | |||
1330 | e1000e_clear_hw_cntrs_base(hw); | 1334 | e1000e_clear_hw_cntrs_base(hw); |
1331 | 1335 | ||
1332 | temp = er32(PRC64); | 1336 | er32(PRC64); |
1333 | temp = er32(PRC127); | 1337 | er32(PRC127); |
1334 | temp = er32(PRC255); | 1338 | er32(PRC255); |
1335 | temp = er32(PRC511); | 1339 | er32(PRC511); |
1336 | temp = er32(PRC1023); | 1340 | er32(PRC1023); |
1337 | temp = er32(PRC1522); | 1341 | er32(PRC1522); |
1338 | temp = er32(PTC64); | 1342 | er32(PTC64); |
1339 | temp = er32(PTC127); | 1343 | er32(PTC127); |
1340 | temp = er32(PTC255); | 1344 | er32(PTC255); |
1341 | temp = er32(PTC511); | 1345 | er32(PTC511); |
1342 | temp = er32(PTC1023); | 1346 | er32(PTC1023); |
1343 | temp = er32(PTC1522); | 1347 | er32(PTC1522); |
1344 | 1348 | ||
1345 | temp = er32(ALGNERRC); | 1349 | er32(ALGNERRC); |
1346 | temp = er32(RXERRC); | 1350 | er32(RXERRC); |
1347 | temp = er32(TNCRS); | 1351 | er32(TNCRS); |
1348 | temp = er32(CEXTERR); | 1352 | er32(CEXTERR); |
1349 | temp = er32(TSCTC); | 1353 | er32(TSCTC); |
1350 | temp = er32(TSCTFC); | 1354 | er32(TSCTFC); |
1351 | 1355 | ||
1352 | temp = er32(MGTPRC); | 1356 | er32(MGTPRC); |
1353 | temp = er32(MGTPDC); | 1357 | er32(MGTPDC); |
1354 | temp = er32(MGTPTC); | 1358 | er32(MGTPTC); |
1355 | 1359 | ||
1356 | temp = er32(IAC); | 1360 | er32(IAC); |
1357 | temp = er32(ICRXOC); | 1361 | er32(ICRXOC); |
1358 | 1362 | ||
1359 | temp = er32(ICRXPTC); | 1363 | er32(ICRXPTC); |
1360 | temp = er32(ICRXATC); | 1364 | er32(ICRXATC); |
1361 | temp = er32(ICTXPTC); | 1365 | er32(ICTXPTC); |
1362 | temp = er32(ICTXATC); | 1366 | er32(ICTXATC); |
1363 | temp = er32(ICTXQEC); | 1367 | er32(ICTXQEC); |
1364 | temp = er32(ICTXQMTC); | 1368 | er32(ICTXQMTC); |
1365 | temp = er32(ICRXDMTC); | 1369 | er32(ICRXDMTC); |
1366 | } | 1370 | } |
1367 | 1371 | ||
1368 | static struct e1000_mac_operations es2_mac_ops = { | 1372 | static struct e1000_mac_operations es2_mac_ops = { |
@@ -1376,6 +1380,8 @@ static struct e1000_mac_operations es2_mac_ops = { | |||
1376 | .led_on = e1000e_led_on_generic, | 1380 | .led_on = e1000e_led_on_generic, |
1377 | .led_off = e1000e_led_off_generic, | 1381 | .led_off = e1000e_led_off_generic, |
1378 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, | 1382 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, |
1383 | .write_vfta = e1000_write_vfta_generic, | ||
1384 | .clear_vfta = e1000_clear_vfta_generic, | ||
1379 | .reset_hw = e1000_reset_hw_80003es2lan, | 1385 | .reset_hw = e1000_reset_hw_80003es2lan, |
1380 | .init_hw = e1000_init_hw_80003es2lan, | 1386 | .init_hw = e1000_init_hw_80003es2lan, |
1381 | .setup_link = e1000e_setup_link, | 1387 | .setup_link = e1000e_setup_link, |
@@ -1384,30 +1390,31 @@ static struct e1000_mac_operations es2_mac_ops = { | |||
1384 | }; | 1390 | }; |
1385 | 1391 | ||
1386 | static struct e1000_phy_operations es2_phy_ops = { | 1392 | static struct e1000_phy_operations es2_phy_ops = { |
1387 | .acquire_phy = e1000_acquire_phy_80003es2lan, | 1393 | .acquire = e1000_acquire_phy_80003es2lan, |
1394 | .check_polarity = e1000_check_polarity_m88, | ||
1388 | .check_reset_block = e1000e_check_reset_block_generic, | 1395 | .check_reset_block = e1000e_check_reset_block_generic, |
1389 | .commit_phy = e1000e_phy_sw_reset, | 1396 | .commit = e1000e_phy_sw_reset, |
1390 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | 1397 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, |
1391 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, | 1398 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, |
1392 | .get_cable_length = e1000_get_cable_length_80003es2lan, | 1399 | .get_cable_length = e1000_get_cable_length_80003es2lan, |
1393 | .get_phy_info = e1000e_get_phy_info_m88, | 1400 | .get_info = e1000e_get_phy_info_m88, |
1394 | .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, | 1401 | .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, |
1395 | .release_phy = e1000_release_phy_80003es2lan, | 1402 | .release = e1000_release_phy_80003es2lan, |
1396 | .reset_phy = e1000e_phy_hw_reset_generic, | 1403 | .reset = e1000e_phy_hw_reset_generic, |
1397 | .set_d0_lplu_state = NULL, | 1404 | .set_d0_lplu_state = NULL, |
1398 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1405 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1399 | .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, | 1406 | .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, |
1400 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | 1407 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, |
1401 | }; | 1408 | }; |
1402 | 1409 | ||
1403 | static struct e1000_nvm_operations es2_nvm_ops = { | 1410 | static struct e1000_nvm_operations es2_nvm_ops = { |
1404 | .acquire_nvm = e1000_acquire_nvm_80003es2lan, | 1411 | .acquire = e1000_acquire_nvm_80003es2lan, |
1405 | .read_nvm = e1000e_read_nvm_eerd, | 1412 | .read = e1000e_read_nvm_eerd, |
1406 | .release_nvm = e1000_release_nvm_80003es2lan, | 1413 | .release = e1000_release_nvm_80003es2lan, |
1407 | .update_nvm = e1000e_update_nvm_checksum_generic, | 1414 | .update = e1000e_update_nvm_checksum_generic, |
1408 | .valid_led_default = e1000e_valid_led_default, | 1415 | .valid_led_default = e1000e_valid_led_default, |
1409 | .validate_nvm = e1000e_validate_nvm_checksum_generic, | 1416 | .validate = e1000e_validate_nvm_checksum_generic, |
1410 | .write_nvm = e1000_write_nvm_80003es2lan, | 1417 | .write = e1000_write_nvm_80003es2lan, |
1411 | }; | 1418 | }; |
1412 | 1419 | ||
1413 | struct e1000_info e1000_es2_info = { | 1420 | struct e1000_info e1000_es2_info = { |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 1bf4d2a5d34f..0aa50c229c79 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -35,14 +35,22 @@ | |||
35 | 35 | ||
36 | #include "e1000.h" | 36 | #include "e1000.h" |
37 | 37 | ||
38 | enum {NETDEV_STATS, E1000_STATS}; | ||
39 | |||
38 | struct e1000_stats { | 40 | struct e1000_stats { |
39 | char stat_string[ETH_GSTRING_LEN]; | 41 | char stat_string[ETH_GSTRING_LEN]; |
42 | int type; | ||
40 | int sizeof_stat; | 43 | int sizeof_stat; |
41 | int stat_offset; | 44 | int stat_offset; |
42 | }; | 45 | }; |
43 | 46 | ||
44 | #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ | 47 | #define E1000_STAT(m) E1000_STATS, \ |
45 | offsetof(struct e1000_adapter, m) | 48 | sizeof(((struct e1000_adapter *)0)->m), \ |
49 | offsetof(struct e1000_adapter, m) | ||
50 | #define E1000_NETDEV_STAT(m) NETDEV_STATS, \ | ||
51 | sizeof(((struct net_device *)0)->m), \ | ||
52 | offsetof(struct net_device, m) | ||
53 | |||
46 | static const struct e1000_stats e1000_gstrings_stats[] = { | 54 | static const struct e1000_stats e1000_gstrings_stats[] = { |
47 | { "rx_packets", E1000_STAT(stats.gprc) }, | 55 | { "rx_packets", E1000_STAT(stats.gprc) }, |
48 | { "tx_packets", E1000_STAT(stats.gptc) }, | 56 | { "tx_packets", E1000_STAT(stats.gptc) }, |
@@ -52,21 +60,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
52 | { "tx_broadcast", E1000_STAT(stats.bptc) }, | 60 | { "tx_broadcast", E1000_STAT(stats.bptc) }, |
53 | { "rx_multicast", E1000_STAT(stats.mprc) }, | 61 | { "rx_multicast", E1000_STAT(stats.mprc) }, |
54 | { "tx_multicast", E1000_STAT(stats.mptc) }, | 62 | { "tx_multicast", E1000_STAT(stats.mptc) }, |
55 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | 63 | { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) }, |
56 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | 64 | { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) }, |
57 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | 65 | { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, |
58 | { "multicast", E1000_STAT(stats.mprc) }, | 66 | { "multicast", E1000_STAT(stats.mprc) }, |
59 | { "collisions", E1000_STAT(stats.colc) }, | 67 | { "collisions", E1000_STAT(stats.colc) }, |
60 | { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, | 68 | { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) }, |
61 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | 69 | { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, |
62 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, | 70 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, |
63 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | 71 | { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, |
64 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | 72 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, |
65 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, | 73 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, |
66 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, | 74 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, |
67 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, | 75 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, |
68 | { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, | 76 | { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, |
69 | { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, | 77 | { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, |
70 | { "tx_window_errors", E1000_STAT(stats.latecol) }, | 78 | { "tx_window_errors", E1000_STAT(stats.latecol) }, |
71 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, | 79 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, |
72 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, | 80 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, |
@@ -182,6 +190,17 @@ static int e1000_get_settings(struct net_device *netdev, | |||
182 | static u32 e1000_get_link(struct net_device *netdev) | 190 | static u32 e1000_get_link(struct net_device *netdev) |
183 | { | 191 | { |
184 | struct e1000_adapter *adapter = netdev_priv(netdev); | 192 | struct e1000_adapter *adapter = netdev_priv(netdev); |
193 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
194 | |||
195 | /* | ||
196 | * If the link is not reported up to netdev, interrupts are disabled, | ||
197 | * and so the physical link state may have changed since we last | ||
198 | * looked. Set get_link_status to make sure that the true link | ||
199 | * state is interrogated, rather than pulling a cached and possibly | ||
200 | * stale link state from the driver. | ||
201 | */ | ||
202 | if (!netif_carrier_ok(netdev)) | ||
203 | mac->get_link_status = 1; | ||
185 | 204 | ||
186 | return e1000_has_link(adapter); | 205 | return e1000_has_link(adapter); |
187 | } | 206 | } |
@@ -327,10 +346,18 @@ static int e1000_set_pauseparam(struct net_device *netdev, | |||
327 | 346 | ||
328 | hw->fc.current_mode = hw->fc.requested_mode; | 347 | hw->fc.current_mode = hw->fc.requested_mode; |
329 | 348 | ||
330 | retval = ((hw->phy.media_type == e1000_media_type_fiber) ? | 349 | if (hw->phy.media_type == e1000_media_type_fiber) { |
331 | hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); | 350 | retval = hw->mac.ops.setup_link(hw); |
351 | /* implicit goto out */ | ||
352 | } else { | ||
353 | retval = e1000e_force_mac_fc(hw); | ||
354 | if (retval) | ||
355 | goto out; | ||
356 | e1000e_set_fc_watermarks(hw); | ||
357 | } | ||
332 | } | 358 | } |
333 | 359 | ||
360 | out: | ||
334 | clear_bit(__E1000_RESETTING, &adapter->state); | 361 | clear_bit(__E1000_RESETTING, &adapter->state); |
335 | return retval; | 362 | return retval; |
336 | } | 363 | } |
@@ -508,7 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev, | |||
508 | 535 | ||
509 | if (ret_val) { | 536 | if (ret_val) { |
510 | /* a read error occurred, throw away the result */ | 537 | /* a read error occurred, throw away the result */ |
511 | memset(eeprom_buff, 0xff, sizeof(eeprom_buff)); | 538 | memset(eeprom_buff, 0xff, sizeof(u16) * |
539 | (last_word - first_word + 1)); | ||
512 | } else { | 540 | } else { |
513 | /* Device's eeprom is always little-endian, word addressable */ | 541 | /* Device's eeprom is always little-endian, word addressable */ |
514 | for (i = 0; i < last_word - first_word + 1; i++) | 542 | for (i = 0; i < last_word - first_word + 1; i++) |
@@ -588,7 +616,9 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
588 | * and flush shadow RAM for applicable controllers | 616 | * and flush shadow RAM for applicable controllers |
589 | */ | 617 | */ |
590 | if ((first_word <= NVM_CHECKSUM_REG) || | 618 | if ((first_word <= NVM_CHECKSUM_REG) || |
591 | (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573)) | 619 | (hw->mac.type == e1000_82583) || |
620 | (hw->mac.type == e1000_82574) || | ||
621 | (hw->mac.type == e1000_82573)) | ||
592 | ret_val = e1000e_update_nvm_checksum(hw); | 622 | ret_val = e1000e_update_nvm_checksum(hw); |
593 | 623 | ||
594 | out: | 624 | out: |
@@ -921,10 +951,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
921 | e1000e_set_interrupt_capability(adapter); | 951 | e1000e_set_interrupt_capability(adapter); |
922 | } | 952 | } |
923 | /* Hook up test interrupt handler just for this test */ | 953 | /* Hook up test interrupt handler just for this test */ |
924 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | 954 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, |
925 | netdev)) { | 955 | netdev)) { |
926 | shared_int = 0; | 956 | shared_int = 0; |
927 | } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, | 957 | } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, |
928 | netdev->name, netdev)) { | 958 | netdev->name, netdev)) { |
929 | *data = 1; | 959 | *data = 1; |
930 | ret_val = -1; | 960 | ret_val = -1; |
@@ -1231,6 +1261,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1231 | 1261 | ||
1232 | hw->mac.autoneg = 0; | 1262 | hw->mac.autoneg = 0; |
1233 | 1263 | ||
1264 | /* Workaround: K1 must be disabled for stable 1Gbps operation */ | ||
1265 | if (hw->mac.type == e1000_pchlan) | ||
1266 | e1000_configure_k1_ich8lan(hw, false); | ||
1267 | |||
1234 | if (hw->phy.type == e1000_phy_m88) { | 1268 | if (hw->phy.type == e1000_phy_m88) { |
1235 | /* Auto-MDI/MDIX Off */ | 1269 | /* Auto-MDI/MDIX Off */ |
1236 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | 1270 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); |
@@ -1761,12 +1795,11 @@ static int e1000_set_wol(struct net_device *netdev, | |||
1761 | { | 1795 | { |
1762 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1796 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1763 | 1797 | ||
1764 | if (wol->wolopts & WAKE_MAGICSECURE) | ||
1765 | return -EOPNOTSUPP; | ||
1766 | |||
1767 | if (!(adapter->flags & FLAG_HAS_WOL) || | 1798 | if (!(adapter->flags & FLAG_HAS_WOL) || |
1768 | !device_can_wakeup(&adapter->pdev->dev)) | 1799 | !device_can_wakeup(&adapter->pdev->dev) || |
1769 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1800 | (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | |
1801 | WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) | ||
1802 | return -EOPNOTSUPP; | ||
1770 | 1803 | ||
1771 | /* these settings will always override what we currently have */ | 1804 | /* these settings will always override what we currently have */ |
1772 | adapter->wol = 0; | 1805 | adapter->wol = 0; |
@@ -1824,6 +1857,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data) | |||
1824 | 1857 | ||
1825 | if ((hw->phy.type == e1000_phy_ife) || | 1858 | if ((hw->phy.type == e1000_phy_ife) || |
1826 | (hw->mac.type == e1000_pchlan) || | 1859 | (hw->mac.type == e1000_pchlan) || |
1860 | (hw->mac.type == e1000_82583) || | ||
1827 | (hw->mac.type == e1000_82574)) { | 1861 | (hw->mac.type == e1000_82574)) { |
1828 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); | 1862 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); |
1829 | if (!adapter->blink_timer.function) { | 1863 | if (!adapter->blink_timer.function) { |
@@ -1904,10 +1938,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1904 | { | 1938 | { |
1905 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1939 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1906 | int i; | 1940 | int i; |
1941 | char *p = NULL; | ||
1907 | 1942 | ||
1908 | e1000e_update_stats(adapter); | 1943 | e1000e_update_stats(adapter); |
1909 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1944 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { |
1910 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; | 1945 | switch (e1000_gstrings_stats[i].type) { |
1946 | case NETDEV_STATS: | ||
1947 | p = (char *) netdev + | ||
1948 | e1000_gstrings_stats[i].stat_offset; | ||
1949 | break; | ||
1950 | case E1000_STATS: | ||
1951 | p = (char *) adapter + | ||
1952 | e1000_gstrings_stats[i].stat_offset; | ||
1953 | break; | ||
1954 | } | ||
1955 | |||
1911 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1956 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1912 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1957 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1913 | } | 1958 | } |
@@ -1967,6 +2012,8 @@ static const struct ethtool_ops e1000_ethtool_ops = { | |||
1967 | .get_sset_count = e1000e_get_sset_count, | 2012 | .get_sset_count = e1000e_get_sset_count, |
1968 | .get_coalesce = e1000_get_coalesce, | 2013 | .get_coalesce = e1000_get_coalesce, |
1969 | .set_coalesce = e1000_set_coalesce, | 2014 | .set_coalesce = e1000_set_coalesce, |
2015 | .get_flags = ethtool_op_get_flags, | ||
2016 | .set_flags = ethtool_op_set_flags, | ||
1970 | }; | 2017 | }; |
1971 | 2018 | ||
1972 | void e1000e_set_ethtool_ops(struct net_device *netdev) | 2019 | void e1000e_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aaea41ef794d..a7d08dae79c4 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -219,7 +219,7 @@ enum e1e_registers { | |||
219 | E1000_HICR = 0x08F00, /* Host Interface Control */ | 219 | E1000_HICR = 0x08F00, /* Host Interface Control */ |
220 | }; | 220 | }; |
221 | 221 | ||
222 | /* RSS registers */ | 222 | #define E1000_MAX_PHY_ADDR 4 |
223 | 223 | ||
224 | /* IGP01E1000 Specific Registers */ | 224 | /* IGP01E1000 Specific Registers */ |
225 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ | 225 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ |
@@ -356,6 +356,7 @@ enum e1e_registers { | |||
356 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | 356 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA |
357 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | 357 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB |
358 | 358 | ||
359 | #define E1000_DEV_ID_ICH8_82567V_3 0x1501 | ||
359 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | 360 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 |
360 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | 361 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A |
361 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | 362 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B |
@@ -741,6 +742,7 @@ struct e1000_mac_operations { | |||
741 | s32 (*check_for_link)(struct e1000_hw *); | 742 | s32 (*check_for_link)(struct e1000_hw *); |
742 | s32 (*cleanup_led)(struct e1000_hw *); | 743 | s32 (*cleanup_led)(struct e1000_hw *); |
743 | void (*clear_hw_cntrs)(struct e1000_hw *); | 744 | void (*clear_hw_cntrs)(struct e1000_hw *); |
745 | void (*clear_vfta)(struct e1000_hw *); | ||
744 | s32 (*get_bus_info)(struct e1000_hw *); | 746 | s32 (*get_bus_info)(struct e1000_hw *); |
745 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); | 747 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); |
746 | s32 (*led_on)(struct e1000_hw *); | 748 | s32 (*led_on)(struct e1000_hw *); |
@@ -751,38 +753,41 @@ struct e1000_mac_operations { | |||
751 | s32 (*setup_link)(struct e1000_hw *); | 753 | s32 (*setup_link)(struct e1000_hw *); |
752 | s32 (*setup_physical_interface)(struct e1000_hw *); | 754 | s32 (*setup_physical_interface)(struct e1000_hw *); |
753 | s32 (*setup_led)(struct e1000_hw *); | 755 | s32 (*setup_led)(struct e1000_hw *); |
756 | void (*write_vfta)(struct e1000_hw *, u32, u32); | ||
754 | }; | 757 | }; |
755 | 758 | ||
756 | /* Function pointers for the PHY. */ | 759 | /* Function pointers for the PHY. */ |
757 | struct e1000_phy_operations { | 760 | struct e1000_phy_operations { |
758 | s32 (*acquire_phy)(struct e1000_hw *); | 761 | s32 (*acquire)(struct e1000_hw *); |
762 | s32 (*cfg_on_link_up)(struct e1000_hw *); | ||
759 | s32 (*check_polarity)(struct e1000_hw *); | 763 | s32 (*check_polarity)(struct e1000_hw *); |
760 | s32 (*check_reset_block)(struct e1000_hw *); | 764 | s32 (*check_reset_block)(struct e1000_hw *); |
761 | s32 (*commit_phy)(struct e1000_hw *); | 765 | s32 (*commit)(struct e1000_hw *); |
762 | s32 (*force_speed_duplex)(struct e1000_hw *); | 766 | s32 (*force_speed_duplex)(struct e1000_hw *); |
763 | s32 (*get_cfg_done)(struct e1000_hw *hw); | 767 | s32 (*get_cfg_done)(struct e1000_hw *hw); |
764 | s32 (*get_cable_length)(struct e1000_hw *); | 768 | s32 (*get_cable_length)(struct e1000_hw *); |
765 | s32 (*get_phy_info)(struct e1000_hw *); | 769 | s32 (*get_info)(struct e1000_hw *); |
766 | s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); | 770 | s32 (*read_reg)(struct e1000_hw *, u32, u16 *); |
767 | s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); | 771 | s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); |
768 | void (*release_phy)(struct e1000_hw *); | 772 | void (*release)(struct e1000_hw *); |
769 | s32 (*reset_phy)(struct e1000_hw *); | 773 | s32 (*reset)(struct e1000_hw *); |
770 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); | 774 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); |
771 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); | 775 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); |
772 | s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); | 776 | s32 (*write_reg)(struct e1000_hw *, u32, u16); |
773 | s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); | 777 | s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); |
774 | s32 (*cfg_on_link_up)(struct e1000_hw *); | 778 | void (*power_up)(struct e1000_hw *); |
779 | void (*power_down)(struct e1000_hw *); | ||
775 | }; | 780 | }; |
776 | 781 | ||
777 | /* Function pointers for the NVM. */ | 782 | /* Function pointers for the NVM. */ |
778 | struct e1000_nvm_operations { | 783 | struct e1000_nvm_operations { |
779 | s32 (*acquire_nvm)(struct e1000_hw *); | 784 | s32 (*acquire)(struct e1000_hw *); |
780 | s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); | 785 | s32 (*read)(struct e1000_hw *, u16, u16, u16 *); |
781 | void (*release_nvm)(struct e1000_hw *); | 786 | void (*release)(struct e1000_hw *); |
782 | s32 (*update_nvm)(struct e1000_hw *); | 787 | s32 (*update)(struct e1000_hw *); |
783 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); | 788 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); |
784 | s32 (*validate_nvm)(struct e1000_hw *); | 789 | s32 (*validate)(struct e1000_hw *); |
785 | s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); | 790 | s32 (*write)(struct e1000_hw *, u16, u16, u16 *); |
786 | }; | 791 | }; |
787 | 792 | ||
788 | struct e1000_mac_info { | 793 | struct e1000_mac_info { |
@@ -925,15 +930,4 @@ struct e1000_hw { | |||
925 | } dev_spec; | 930 | } dev_spec; |
926 | }; | 931 | }; |
927 | 932 | ||
928 | #ifdef DEBUG | ||
929 | #define hw_dbg(hw, format, arg...) \ | ||
930 | printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) | ||
931 | #else | ||
932 | static inline int __attribute__ ((format (printf, 2, 3))) | ||
933 | hw_dbg(struct e1000_hw *hw, const char *format, ...) | ||
934 | { | ||
935 | return 0; | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | #endif | 933 | #endif |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 51ddb04ab195..7b33be98a2ca 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -54,11 +54,6 @@ | |||
54 | * 82578DC Gigabit Network Connection | 54 | * 82578DC Gigabit Network Connection |
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include <linux/netdevice.h> | ||
58 | #include <linux/ethtool.h> | ||
59 | #include <linux/delay.h> | ||
60 | #include <linux/pci.h> | ||
61 | |||
62 | #include "e1000.h" | 57 | #include "e1000.h" |
63 | 58 | ||
64 | #define ICH_FLASH_GFPREG 0x0000 | 59 | #define ICH_FLASH_GFPREG 0x0000 |
@@ -200,7 +195,6 @@ union ich8_flash_protected_range { | |||
200 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); | 195 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); |
201 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); | 196 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); |
202 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); | 197 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); |
203 | static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); | ||
204 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); | 198 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); |
205 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | 199 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, |
206 | u32 offset, u8 byte); | 200 | u32 offset, u8 byte); |
@@ -222,9 +216,9 @@ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); | |||
222 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); | 216 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); |
223 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); | 217 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); |
224 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | 218 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); |
219 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); | ||
225 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | 220 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); |
226 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | 221 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); |
227 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
228 | 222 | ||
229 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | 223 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) |
230 | { | 224 | { |
@@ -265,26 +259,37 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
265 | phy->addr = 1; | 259 | phy->addr = 1; |
266 | phy->reset_delay_us = 100; | 260 | phy->reset_delay_us = 100; |
267 | 261 | ||
268 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | 262 | phy->ops.read_reg = e1000_read_phy_reg_hv; |
269 | phy->ops.read_phy_reg = e1000_read_phy_reg_hv; | 263 | phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; |
270 | phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; | ||
271 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | 264 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; |
272 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | 265 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; |
273 | phy->ops.write_phy_reg = e1000_write_phy_reg_hv; | 266 | phy->ops.write_reg = e1000_write_phy_reg_hv; |
274 | phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; | 267 | phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; |
268 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
269 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
275 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 270 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
276 | 271 | ||
277 | phy->id = e1000_phy_unknown; | 272 | phy->id = e1000_phy_unknown; |
278 | e1000e_get_phy_id(hw); | 273 | e1000e_get_phy_id(hw); |
279 | phy->type = e1000e_get_phy_type_from_id(phy->id); | 274 | phy->type = e1000e_get_phy_type_from_id(phy->id); |
280 | 275 | ||
281 | if (phy->type == e1000_phy_82577) { | 276 | switch (phy->type) { |
277 | case e1000_phy_82577: | ||
282 | phy->ops.check_polarity = e1000_check_polarity_82577; | 278 | phy->ops.check_polarity = e1000_check_polarity_82577; |
283 | phy->ops.force_speed_duplex = | 279 | phy->ops.force_speed_duplex = |
284 | e1000_phy_force_speed_duplex_82577; | 280 | e1000_phy_force_speed_duplex_82577; |
285 | phy->ops.get_cable_length = e1000_get_cable_length_82577; | 281 | phy->ops.get_cable_length = e1000_get_cable_length_82577; |
286 | phy->ops.get_phy_info = e1000_get_phy_info_82577; | 282 | phy->ops.get_info = e1000_get_phy_info_82577; |
287 | phy->ops.commit_phy = e1000e_phy_sw_reset; | 283 | phy->ops.commit = e1000e_phy_sw_reset; |
284 | case e1000_phy_82578: | ||
285 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
286 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
287 | phy->ops.get_cable_length = e1000e_get_cable_length_m88; | ||
288 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
289 | break; | ||
290 | default: | ||
291 | ret_val = -E1000_ERR_PHY; | ||
292 | break; | ||
288 | } | 293 | } |
289 | 294 | ||
290 | return ret_val; | 295 | return ret_val; |
@@ -305,17 +310,22 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
305 | phy->addr = 1; | 310 | phy->addr = 1; |
306 | phy->reset_delay_us = 100; | 311 | phy->reset_delay_us = 100; |
307 | 312 | ||
313 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
314 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
315 | |||
308 | /* | 316 | /* |
309 | * We may need to do this twice - once for IGP and if that fails, | 317 | * We may need to do this twice - once for IGP and if that fails, |
310 | * we'll set BM func pointers and try again | 318 | * we'll set BM func pointers and try again |
311 | */ | 319 | */ |
312 | ret_val = e1000e_determine_phy_address(hw); | 320 | ret_val = e1000e_determine_phy_address(hw); |
313 | if (ret_val) { | 321 | if (ret_val) { |
314 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 322 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
315 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 323 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
316 | ret_val = e1000e_determine_phy_address(hw); | 324 | ret_val = e1000e_determine_phy_address(hw); |
317 | if (ret_val) | 325 | if (ret_val) { |
326 | e_dbg("Cannot determine PHY addr. Erroring out\n"); | ||
318 | return ret_val; | 327 | return ret_val; |
328 | } | ||
319 | } | 329 | } |
320 | 330 | ||
321 | phy->id = 0; | 331 | phy->id = 0; |
@@ -332,29 +342,36 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
332 | case IGP03E1000_E_PHY_ID: | 342 | case IGP03E1000_E_PHY_ID: |
333 | phy->type = e1000_phy_igp_3; | 343 | phy->type = e1000_phy_igp_3; |
334 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 344 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
335 | phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; | 345 | phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; |
336 | phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; | 346 | phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; |
347 | phy->ops.get_info = e1000e_get_phy_info_igp; | ||
348 | phy->ops.check_polarity = e1000_check_polarity_igp; | ||
349 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; | ||
337 | break; | 350 | break; |
338 | case IFE_E_PHY_ID: | 351 | case IFE_E_PHY_ID: |
339 | case IFE_PLUS_E_PHY_ID: | 352 | case IFE_PLUS_E_PHY_ID: |
340 | case IFE_C_E_PHY_ID: | 353 | case IFE_C_E_PHY_ID: |
341 | phy->type = e1000_phy_ife; | 354 | phy->type = e1000_phy_ife; |
342 | phy->autoneg_mask = E1000_ALL_NOT_GIG; | 355 | phy->autoneg_mask = E1000_ALL_NOT_GIG; |
356 | phy->ops.get_info = e1000_get_phy_info_ife; | ||
357 | phy->ops.check_polarity = e1000_check_polarity_ife; | ||
358 | phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; | ||
343 | break; | 359 | break; |
344 | case BME1000_E_PHY_ID: | 360 | case BME1000_E_PHY_ID: |
345 | phy->type = e1000_phy_bm; | 361 | phy->type = e1000_phy_bm; |
346 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 362 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
347 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 363 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
348 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 364 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
349 | hw->phy.ops.commit_phy = e1000e_phy_sw_reset; | 365 | phy->ops.commit = e1000e_phy_sw_reset; |
366 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
367 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
368 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
350 | break; | 369 | break; |
351 | default: | 370 | default: |
352 | return -E1000_ERR_PHY; | 371 | return -E1000_ERR_PHY; |
353 | break; | 372 | break; |
354 | } | 373 | } |
355 | 374 | ||
356 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | ||
357 | |||
358 | return 0; | 375 | return 0; |
359 | } | 376 | } |
360 | 377 | ||
@@ -374,7 +391,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
374 | 391 | ||
375 | /* Can't read flash registers if the register set isn't mapped. */ | 392 | /* Can't read flash registers if the register set isn't mapped. */ |
376 | if (!hw->flash_address) { | 393 | if (!hw->flash_address) { |
377 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); | 394 | e_dbg("ERROR: Flash registers not mapped\n"); |
378 | return -E1000_ERR_CONFIG; | 395 | return -E1000_ERR_CONFIG; |
379 | } | 396 | } |
380 | 397 | ||
@@ -407,7 +424,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
407 | 424 | ||
408 | /* Clear shadow ram */ | 425 | /* Clear shadow ram */ |
409 | for (i = 0; i < nvm->word_size; i++) { | 426 | for (i = 0; i < nvm->word_size; i++) { |
410 | dev_spec->shadow_ram[i].modified = 0; | 427 | dev_spec->shadow_ram[i].modified = false; |
411 | dev_spec->shadow_ram[i].value = 0xFFFF; | 428 | dev_spec->shadow_ram[i].value = 0xFFFF; |
412 | } | 429 | } |
413 | 430 | ||
@@ -436,7 +453,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
436 | if (mac->type == e1000_ich8lan) | 453 | if (mac->type == e1000_ich8lan) |
437 | mac->rar_entry_count--; | 454 | mac->rar_entry_count--; |
438 | /* Set if manageability features are enabled. */ | 455 | /* Set if manageability features are enabled. */ |
439 | mac->arc_subsystem_valid = 1; | 456 | mac->arc_subsystem_valid = true; |
440 | 457 | ||
441 | /* LED operations */ | 458 | /* LED operations */ |
442 | switch (mac->type) { | 459 | switch (mac->type) { |
@@ -470,7 +487,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
470 | 487 | ||
471 | /* Enable PCS Lock-loss workaround for ICH8 */ | 488 | /* Enable PCS Lock-loss workaround for ICH8 */ |
472 | if (mac->type == e1000_ich8lan) | 489 | if (mac->type == e1000_ich8lan) |
473 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); | 490 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); |
474 | 491 | ||
475 | return 0; | 492 | return 0; |
476 | } | 493 | } |
@@ -556,7 +573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
556 | */ | 573 | */ |
557 | ret_val = e1000e_config_fc_after_link_up(hw); | 574 | ret_val = e1000e_config_fc_after_link_up(hw); |
558 | if (ret_val) | 575 | if (ret_val) |
559 | hw_dbg(hw, "Error configuring flow control\n"); | 576 | e_dbg("Error configuring flow control\n"); |
560 | 577 | ||
561 | out: | 578 | out: |
562 | return ret_val; | 579 | return ret_val; |
@@ -636,8 +653,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
636 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; | 653 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; |
637 | s32 ret_val = 0; | 654 | s32 ret_val = 0; |
638 | 655 | ||
639 | might_sleep(); | ||
640 | |||
641 | mutex_lock(&swflag_mutex); | 656 | mutex_lock(&swflag_mutex); |
642 | 657 | ||
643 | while (timeout) { | 658 | while (timeout) { |
@@ -650,7 +665,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
650 | } | 665 | } |
651 | 666 | ||
652 | if (!timeout) { | 667 | if (!timeout) { |
653 | hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); | 668 | e_dbg("SW/FW/HW has locked the resource for too long.\n"); |
654 | ret_val = -E1000_ERR_CONFIG; | 669 | ret_val = -E1000_ERR_CONFIG; |
655 | goto out; | 670 | goto out; |
656 | } | 671 | } |
@@ -670,7 +685,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
670 | } | 685 | } |
671 | 686 | ||
672 | if (!timeout) { | 687 | if (!timeout) { |
673 | hw_dbg(hw, "Failed to acquire the semaphore.\n"); | 688 | e_dbg("Failed to acquire the semaphore.\n"); |
674 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 689 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
675 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 690 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
676 | ret_val = -E1000_ERR_CONFIG; | 691 | ret_val = -E1000_ERR_CONFIG; |
@@ -714,7 +729,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
714 | **/ | 729 | **/ |
715 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) | 730 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) |
716 | { | 731 | { |
717 | u32 fwsm = er32(FWSM); | 732 | u32 fwsm; |
733 | |||
734 | fwsm = er32(FWSM); | ||
718 | 735 | ||
719 | return (fwsm & E1000_FWSM_MODE_MASK) == | 736 | return (fwsm & E1000_FWSM_MODE_MASK) == |
720 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); | 737 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); |
@@ -738,77 +755,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) | |||
738 | } | 755 | } |
739 | 756 | ||
740 | /** | 757 | /** |
741 | * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex | ||
742 | * @hw: pointer to the HW structure | ||
743 | * | ||
744 | * Forces the speed and duplex settings of the PHY. | ||
745 | * This is a function pointer entry point only called by | ||
746 | * PHY setup routines. | ||
747 | **/ | ||
748 | static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | ||
749 | { | ||
750 | struct e1000_phy_info *phy = &hw->phy; | ||
751 | s32 ret_val; | ||
752 | u16 data; | ||
753 | bool link; | ||
754 | |||
755 | if (phy->type != e1000_phy_ife) { | ||
756 | ret_val = e1000e_phy_force_speed_duplex_igp(hw); | ||
757 | return ret_val; | ||
758 | } | ||
759 | |||
760 | ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | ||
761 | if (ret_val) | ||
762 | return ret_val; | ||
763 | |||
764 | e1000e_phy_force_speed_duplex_setup(hw, &data); | ||
765 | |||
766 | ret_val = e1e_wphy(hw, PHY_CONTROL, data); | ||
767 | if (ret_val) | ||
768 | return ret_val; | ||
769 | |||
770 | /* Disable MDI-X support for 10/100 */ | ||
771 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
772 | if (ret_val) | ||
773 | return ret_val; | ||
774 | |||
775 | data &= ~IFE_PMC_AUTO_MDIX; | ||
776 | data &= ~IFE_PMC_FORCE_MDIX; | ||
777 | |||
778 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | ||
779 | if (ret_val) | ||
780 | return ret_val; | ||
781 | |||
782 | hw_dbg(hw, "IFE PMC: %X\n", data); | ||
783 | |||
784 | udelay(1); | ||
785 | |||
786 | if (phy->autoneg_wait_to_complete) { | ||
787 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); | ||
788 | |||
789 | ret_val = e1000e_phy_has_link_generic(hw, | ||
790 | PHY_FORCE_LIMIT, | ||
791 | 100000, | ||
792 | &link); | ||
793 | if (ret_val) | ||
794 | return ret_val; | ||
795 | |||
796 | if (!link) | ||
797 | hw_dbg(hw, "Link taking longer than expected.\n"); | ||
798 | |||
799 | /* Try once more */ | ||
800 | ret_val = e1000e_phy_has_link_generic(hw, | ||
801 | PHY_FORCE_LIMIT, | ||
802 | 100000, | ||
803 | &link); | ||
804 | if (ret_val) | ||
805 | return ret_val; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration | 758 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration |
813 | * @hw: pointer to the HW structure | 759 | * @hw: pointer to the HW structure |
814 | * | 760 | * |
@@ -822,7 +768,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
822 | s32 ret_val; | 768 | s32 ret_val; |
823 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | 769 | u16 word_addr, reg_data, reg_addr, phy_page = 0; |
824 | 770 | ||
825 | ret_val = hw->phy.ops.acquire_phy(hw); | 771 | ret_val = hw->phy.ops.acquire(hw); |
826 | if (ret_val) | 772 | if (ret_val) |
827 | return ret_val; | 773 | return ret_val; |
828 | 774 | ||
@@ -918,7 +864,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
918 | reg_addr &= PHY_REG_MASK; | 864 | reg_addr &= PHY_REG_MASK; |
919 | reg_addr |= phy_page; | 865 | reg_addr |= phy_page; |
920 | 866 | ||
921 | ret_val = phy->ops.write_phy_reg_locked(hw, | 867 | ret_val = phy->ops.write_reg_locked(hw, |
922 | (u32)reg_addr, | 868 | (u32)reg_addr, |
923 | reg_data); | 869 | reg_data); |
924 | if (ret_val) | 870 | if (ret_val) |
@@ -927,7 +873,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
927 | } | 873 | } |
928 | 874 | ||
929 | out: | 875 | out: |
930 | hw->phy.ops.release_phy(hw); | 876 | hw->phy.ops.release(hw); |
931 | return ret_val; | 877 | return ret_val; |
932 | } | 878 | } |
933 | 879 | ||
@@ -951,15 +897,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
951 | goto out; | 897 | goto out; |
952 | 898 | ||
953 | /* Wrap the whole flow with the sw flag */ | 899 | /* Wrap the whole flow with the sw flag */ |
954 | ret_val = hw->phy.ops.acquire_phy(hw); | 900 | ret_val = hw->phy.ops.acquire(hw); |
955 | if (ret_val) | 901 | if (ret_val) |
956 | goto out; | 902 | goto out; |
957 | 903 | ||
958 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | 904 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ |
959 | if (link) { | 905 | if (link) { |
960 | if (hw->phy.type == e1000_phy_82578) { | 906 | if (hw->phy.type == e1000_phy_82578) { |
961 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 907 | ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, |
962 | BM_CS_STATUS, | ||
963 | &status_reg); | 908 | &status_reg); |
964 | if (ret_val) | 909 | if (ret_val) |
965 | goto release; | 910 | goto release; |
@@ -975,8 +920,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
975 | } | 920 | } |
976 | 921 | ||
977 | if (hw->phy.type == e1000_phy_82577) { | 922 | if (hw->phy.type == e1000_phy_82577) { |
978 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 923 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, |
979 | HV_M_STATUS, | ||
980 | &status_reg); | 924 | &status_reg); |
981 | if (ret_val) | 925 | if (ret_val) |
982 | goto release; | 926 | goto release; |
@@ -992,14 +936,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
992 | } | 936 | } |
993 | 937 | ||
994 | /* Link stall fix for link up */ | 938 | /* Link stall fix for link up */ |
995 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 939 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
996 | 0x0100); | 940 | 0x0100); |
997 | if (ret_val) | 941 | if (ret_val) |
998 | goto release; | 942 | goto release; |
999 | 943 | ||
1000 | } else { | 944 | } else { |
1001 | /* Link stall fix for link down */ | 945 | /* Link stall fix for link down */ |
1002 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 946 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
1003 | 0x4100); | 947 | 0x4100); |
1004 | if (ret_val) | 948 | if (ret_val) |
1005 | goto release; | 949 | goto release; |
@@ -1008,7 +952,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
1008 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); | 952 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); |
1009 | 953 | ||
1010 | release: | 954 | release: |
1011 | hw->phy.ops.release_phy(hw); | 955 | hw->phy.ops.release(hw); |
1012 | out: | 956 | out: |
1013 | return ret_val; | 957 | return ret_val; |
1014 | } | 958 | } |
@@ -1023,7 +967,7 @@ out: | |||
1023 | * | 967 | * |
1024 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | 968 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) |
1025 | **/ | 969 | **/ |
1026 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | 970 | s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) |
1027 | { | 971 | { |
1028 | s32 ret_val = 0; | 972 | s32 ret_val = 0; |
1029 | u32 ctrl_reg = 0; | 973 | u32 ctrl_reg = 0; |
@@ -1084,7 +1028,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1084 | if (hw->mac.type != e1000_pchlan) | 1028 | if (hw->mac.type != e1000_pchlan) |
1085 | return ret_val; | 1029 | return ret_val; |
1086 | 1030 | ||
1087 | ret_val = hw->phy.ops.acquire_phy(hw); | 1031 | ret_val = hw->phy.ops.acquire(hw); |
1088 | if (ret_val) | 1032 | if (ret_val) |
1089 | return ret_val; | 1033 | return ret_val; |
1090 | 1034 | ||
@@ -1098,7 +1042,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1098 | 1042 | ||
1099 | mac_reg = er32(PHY_CTRL); | 1043 | mac_reg = er32(PHY_CTRL); |
1100 | 1044 | ||
1101 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); | 1045 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); |
1102 | if (ret_val) | 1046 | if (ret_val) |
1103 | goto out; | 1047 | goto out; |
1104 | 1048 | ||
@@ -1118,11 +1062,12 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1118 | oem_reg |= HV_OEM_BITS_LPLU; | 1062 | oem_reg |= HV_OEM_BITS_LPLU; |
1119 | } | 1063 | } |
1120 | /* Restart auto-neg to activate the bits */ | 1064 | /* Restart auto-neg to activate the bits */ |
1121 | oem_reg |= HV_OEM_BITS_RESTART_AN; | 1065 | if (!e1000_check_reset_block(hw)) |
1122 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); | 1066 | oem_reg |= HV_OEM_BITS_RESTART_AN; |
1067 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); | ||
1123 | 1068 | ||
1124 | out: | 1069 | out: |
1125 | hw->phy.ops.release_phy(hw); | 1070 | hw->phy.ops.release(hw); |
1126 | 1071 | ||
1127 | return ret_val; | 1072 | return ret_val; |
1128 | } | 1073 | } |
@@ -1165,7 +1110,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1165 | } | 1110 | } |
1166 | 1111 | ||
1167 | /* Select page 0 */ | 1112 | /* Select page 0 */ |
1168 | ret_val = hw->phy.ops.acquire_phy(hw); | 1113 | ret_val = hw->phy.ops.acquire(hw); |
1169 | if (ret_val) | 1114 | if (ret_val) |
1170 | return ret_val; | 1115 | return ret_val; |
1171 | 1116 | ||
@@ -1173,7 +1118,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1173 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | 1118 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); |
1174 | if (ret_val) | 1119 | if (ret_val) |
1175 | goto out; | 1120 | goto out; |
1176 | hw->phy.ops.release_phy(hw); | 1121 | hw->phy.ops.release(hw); |
1177 | 1122 | ||
1178 | /* | 1123 | /* |
1179 | * Configure the K1 Si workaround during phy reset assuming there is | 1124 | * Configure the K1 Si workaround during phy reset assuming there is |
@@ -1209,7 +1154,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | |||
1209 | * leave the PHY in a bad state possibly resulting in no link. | 1154 | * leave the PHY in a bad state possibly resulting in no link. |
1210 | */ | 1155 | */ |
1211 | if (loop == 0) | 1156 | if (loop == 0) |
1212 | hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); | 1157 | e_dbg("LAN_INIT_DONE not set, increase timeout\n"); |
1213 | 1158 | ||
1214 | /* Clear the Init Done bit for the next init event */ | 1159 | /* Clear the Init Done bit for the next init event */ |
1215 | data = er32(STATUS); | 1160 | data = er32(STATUS); |
@@ -1261,122 +1206,6 @@ out: | |||
1261 | } | 1206 | } |
1262 | 1207 | ||
1263 | /** | 1208 | /** |
1264 | * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states | ||
1265 | * @hw: pointer to the HW structure | ||
1266 | * | ||
1267 | * Populates "phy" structure with various feature states. | ||
1268 | * This function is only called by other family-specific | ||
1269 | * routines. | ||
1270 | **/ | ||
1271 | static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) | ||
1272 | { | ||
1273 | struct e1000_phy_info *phy = &hw->phy; | ||
1274 | s32 ret_val; | ||
1275 | u16 data; | ||
1276 | bool link; | ||
1277 | |||
1278 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
1279 | if (ret_val) | ||
1280 | return ret_val; | ||
1281 | |||
1282 | if (!link) { | ||
1283 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | ||
1284 | return -E1000_ERR_CONFIG; | ||
1285 | } | ||
1286 | |||
1287 | ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | ||
1288 | if (ret_val) | ||
1289 | return ret_val; | ||
1290 | phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE)); | ||
1291 | |||
1292 | if (phy->polarity_correction) { | ||
1293 | ret_val = phy->ops.check_polarity(hw); | ||
1294 | if (ret_val) | ||
1295 | return ret_val; | ||
1296 | } else { | ||
1297 | /* Polarity is forced */ | ||
1298 | phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | ||
1299 | ? e1000_rev_polarity_reversed | ||
1300 | : e1000_rev_polarity_normal; | ||
1301 | } | ||
1302 | |||
1303 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
1304 | if (ret_val) | ||
1305 | return ret_val; | ||
1306 | |||
1307 | phy->is_mdix = (data & IFE_PMC_MDIX_STATUS); | ||
1308 | |||
1309 | /* The following parameters are undefined for 10/100 operation. */ | ||
1310 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
1311 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
1312 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
1313 | |||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | /** | ||
1318 | * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info | ||
1319 | * @hw: pointer to the HW structure | ||
1320 | * | ||
1321 | * Wrapper for calling the get_phy_info routines for the appropriate phy type. | ||
1322 | * This is a function pointer entry point called by drivers | ||
1323 | * or other shared routines. | ||
1324 | **/ | ||
1325 | static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) | ||
1326 | { | ||
1327 | switch (hw->phy.type) { | ||
1328 | case e1000_phy_ife: | ||
1329 | return e1000_get_phy_info_ife_ich8lan(hw); | ||
1330 | break; | ||
1331 | case e1000_phy_igp_3: | ||
1332 | case e1000_phy_bm: | ||
1333 | case e1000_phy_82578: | ||
1334 | case e1000_phy_82577: | ||
1335 | return e1000e_get_phy_info_igp(hw); | ||
1336 | break; | ||
1337 | default: | ||
1338 | break; | ||
1339 | } | ||
1340 | |||
1341 | return -E1000_ERR_PHY_TYPE; | ||
1342 | } | ||
1343 | |||
1344 | /** | ||
1345 | * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY | ||
1346 | * @hw: pointer to the HW structure | ||
1347 | * | ||
1348 | * Polarity is determined on the polarity reversal feature being enabled. | ||
1349 | * This function is only called by other family-specific | ||
1350 | * routines. | ||
1351 | **/ | ||
1352 | static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) | ||
1353 | { | ||
1354 | struct e1000_phy_info *phy = &hw->phy; | ||
1355 | s32 ret_val; | ||
1356 | u16 phy_data, offset, mask; | ||
1357 | |||
1358 | /* | ||
1359 | * Polarity is determined based on the reversal feature being enabled. | ||
1360 | */ | ||
1361 | if (phy->polarity_correction) { | ||
1362 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | ||
1363 | mask = IFE_PESC_POLARITY_REVERSED; | ||
1364 | } else { | ||
1365 | offset = IFE_PHY_SPECIAL_CONTROL; | ||
1366 | mask = IFE_PSC_FORCE_POLARITY; | ||
1367 | } | ||
1368 | |||
1369 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1370 | |||
1371 | if (!ret_val) | ||
1372 | phy->cable_polarity = (phy_data & mask) | ||
1373 | ? e1000_rev_polarity_reversed | ||
1374 | : e1000_rev_polarity_normal; | ||
1375 | |||
1376 | return ret_val; | ||
1377 | } | ||
1378 | |||
1379 | /** | ||
1380 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state | 1209 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state |
1381 | * @hw: pointer to the HW structure | 1210 | * @hw: pointer to the HW structure |
1382 | * @active: true to enable LPLU, false to disable | 1211 | * @active: true to enable LPLU, false to disable |
@@ -1411,7 +1240,7 @@ out: | |||
1411 | /** | 1240 | /** |
1412 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state | 1241 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state |
1413 | * @hw: pointer to the HW structure | 1242 | * @hw: pointer to the HW structure |
1414 | * @active: TRUE to enable LPLU, FALSE to disable | 1243 | * @active: true to enable LPLU, false to disable |
1415 | * | 1244 | * |
1416 | * Sets the LPLU D0 state according to the active flag. When | 1245 | * Sets the LPLU D0 state according to the active flag. When |
1417 | * activating LPLU this function also disables smart speed | 1246 | * activating LPLU this function also disables smart speed |
@@ -1497,7 +1326,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
1497 | /** | 1326 | /** |
1498 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state | 1327 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state |
1499 | * @hw: pointer to the HW structure | 1328 | * @hw: pointer to the HW structure |
1500 | * @active: TRUE to enable LPLU, FALSE to disable | 1329 | * @active: true to enable LPLU, false to disable |
1501 | * | 1330 | * |
1502 | * Sets the LPLU D3 state according to the active flag. When | 1331 | * Sets the LPLU D3 state according to the active flag. When |
1503 | * activating LPLU this function also disables smart speed | 1332 | * activating LPLU this function also disables smart speed |
@@ -1610,7 +1439,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1610 | 1439 | ||
1611 | return 0; | 1440 | return 0; |
1612 | } | 1441 | } |
1613 | hw_dbg(hw, "Unable to determine valid NVM bank via EEC - " | 1442 | e_dbg("Unable to determine valid NVM bank via EEC - " |
1614 | "reading flash signature\n"); | 1443 | "reading flash signature\n"); |
1615 | /* fall-thru */ | 1444 | /* fall-thru */ |
1616 | default: | 1445 | default: |
@@ -1640,7 +1469,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1640 | return 0; | 1469 | return 0; |
1641 | } | 1470 | } |
1642 | 1471 | ||
1643 | hw_dbg(hw, "ERROR: No valid NVM bank present\n"); | 1472 | e_dbg("ERROR: No valid NVM bank present\n"); |
1644 | return -E1000_ERR_NVM; | 1473 | return -E1000_ERR_NVM; |
1645 | } | 1474 | } |
1646 | 1475 | ||
@@ -1668,16 +1497,16 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1668 | 1497 | ||
1669 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1498 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1670 | (words == 0)) { | 1499 | (words == 0)) { |
1671 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1500 | e_dbg("nvm parameter(s) out of bounds\n"); |
1672 | ret_val = -E1000_ERR_NVM; | 1501 | ret_val = -E1000_ERR_NVM; |
1673 | goto out; | 1502 | goto out; |
1674 | } | 1503 | } |
1675 | 1504 | ||
1676 | nvm->ops.acquire_nvm(hw); | 1505 | nvm->ops.acquire(hw); |
1677 | 1506 | ||
1678 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1507 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1679 | if (ret_val) { | 1508 | if (ret_val) { |
1680 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1509 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
1681 | bank = 0; | 1510 | bank = 0; |
1682 | } | 1511 | } |
1683 | 1512 | ||
@@ -1699,11 +1528,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1699 | } | 1528 | } |
1700 | } | 1529 | } |
1701 | 1530 | ||
1702 | nvm->ops.release_nvm(hw); | 1531 | nvm->ops.release(hw); |
1703 | 1532 | ||
1704 | out: | 1533 | out: |
1705 | if (ret_val) | 1534 | if (ret_val) |
1706 | hw_dbg(hw, "NVM read error: %d\n", ret_val); | 1535 | e_dbg("NVM read error: %d\n", ret_val); |
1707 | 1536 | ||
1708 | return ret_val; | 1537 | return ret_val; |
1709 | } | 1538 | } |
@@ -1725,7 +1554,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1725 | 1554 | ||
1726 | /* Check if the flash descriptor is valid */ | 1555 | /* Check if the flash descriptor is valid */ |
1727 | if (hsfsts.hsf_status.fldesvalid == 0) { | 1556 | if (hsfsts.hsf_status.fldesvalid == 0) { |
1728 | hw_dbg(hw, "Flash descriptor invalid. " | 1557 | e_dbg("Flash descriptor invalid. " |
1729 | "SW Sequencing must be used."); | 1558 | "SW Sequencing must be used."); |
1730 | return -E1000_ERR_NVM; | 1559 | return -E1000_ERR_NVM; |
1731 | } | 1560 | } |
@@ -1748,7 +1577,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1748 | if (hsfsts.hsf_status.flcinprog == 0) { | 1577 | if (hsfsts.hsf_status.flcinprog == 0) { |
1749 | /* | 1578 | /* |
1750 | * There is no cycle running at present, | 1579 | * There is no cycle running at present, |
1751 | * so we can start a cycle | 1580 | * so we can start a cycle. |
1752 | * Begin by setting Flash Cycle Done. | 1581 | * Begin by setting Flash Cycle Done. |
1753 | */ | 1582 | */ |
1754 | hsfsts.hsf_status.flcdone = 1; | 1583 | hsfsts.hsf_status.flcdone = 1; |
@@ -1756,7 +1585,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1756 | ret_val = 0; | 1585 | ret_val = 0; |
1757 | } else { | 1586 | } else { |
1758 | /* | 1587 | /* |
1759 | * otherwise poll for sometime so the current | 1588 | * Otherwise poll for sometime so the current |
1760 | * cycle has a chance to end before giving up. | 1589 | * cycle has a chance to end before giving up. |
1761 | */ | 1590 | */ |
1762 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { | 1591 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { |
@@ -1775,7 +1604,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1775 | hsfsts.hsf_status.flcdone = 1; | 1604 | hsfsts.hsf_status.flcdone = 1; |
1776 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 1605 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
1777 | } else { | 1606 | } else { |
1778 | hw_dbg(hw, "Flash controller busy, cannot get access"); | 1607 | e_dbg("Flash controller busy, cannot get access"); |
1779 | } | 1608 | } |
1780 | } | 1609 | } |
1781 | 1610 | ||
@@ -1925,7 +1754,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1925 | /* Repeat for some time before giving up. */ | 1754 | /* Repeat for some time before giving up. */ |
1926 | continue; | 1755 | continue; |
1927 | } else if (hsfsts.hsf_status.flcdone == 0) { | 1756 | } else if (hsfsts.hsf_status.flcdone == 0) { |
1928 | hw_dbg(hw, "Timeout error - flash cycle " | 1757 | e_dbg("Timeout error - flash cycle " |
1929 | "did not complete."); | 1758 | "did not complete."); |
1930 | break; | 1759 | break; |
1931 | } | 1760 | } |
@@ -1953,18 +1782,18 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1953 | 1782 | ||
1954 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1783 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1955 | (words == 0)) { | 1784 | (words == 0)) { |
1956 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1785 | e_dbg("nvm parameter(s) out of bounds\n"); |
1957 | return -E1000_ERR_NVM; | 1786 | return -E1000_ERR_NVM; |
1958 | } | 1787 | } |
1959 | 1788 | ||
1960 | nvm->ops.acquire_nvm(hw); | 1789 | nvm->ops.acquire(hw); |
1961 | 1790 | ||
1962 | for (i = 0; i < words; i++) { | 1791 | for (i = 0; i < words; i++) { |
1963 | dev_spec->shadow_ram[offset+i].modified = 1; | 1792 | dev_spec->shadow_ram[offset+i].modified = true; |
1964 | dev_spec->shadow_ram[offset+i].value = data[i]; | 1793 | dev_spec->shadow_ram[offset+i].value = data[i]; |
1965 | } | 1794 | } |
1966 | 1795 | ||
1967 | nvm->ops.release_nvm(hw); | 1796 | nvm->ops.release(hw); |
1968 | 1797 | ||
1969 | return 0; | 1798 | return 0; |
1970 | } | 1799 | } |
@@ -1995,7 +1824,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1995 | if (nvm->type != e1000_nvm_flash_sw) | 1824 | if (nvm->type != e1000_nvm_flash_sw) |
1996 | goto out; | 1825 | goto out; |
1997 | 1826 | ||
1998 | nvm->ops.acquire_nvm(hw); | 1827 | nvm->ops.acquire(hw); |
1999 | 1828 | ||
2000 | /* | 1829 | /* |
2001 | * We're writing to the opposite bank so if we're on bank 1, | 1830 | * We're writing to the opposite bank so if we're on bank 1, |
@@ -2004,7 +1833,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2004 | */ | 1833 | */ |
2005 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1834 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
2006 | if (ret_val) { | 1835 | if (ret_val) { |
2007 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1836 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
2008 | bank = 0; | 1837 | bank = 0; |
2009 | } | 1838 | } |
2010 | 1839 | ||
@@ -2013,7 +1842,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2013 | old_bank_offset = 0; | 1842 | old_bank_offset = 0; |
2014 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | 1843 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); |
2015 | if (ret_val) { | 1844 | if (ret_val) { |
2016 | nvm->ops.release_nvm(hw); | 1845 | nvm->ops.release(hw); |
2017 | goto out; | 1846 | goto out; |
2018 | } | 1847 | } |
2019 | } else { | 1848 | } else { |
@@ -2021,7 +1850,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2021 | new_bank_offset = 0; | 1850 | new_bank_offset = 0; |
2022 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | 1851 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); |
2023 | if (ret_val) { | 1852 | if (ret_val) { |
2024 | nvm->ops.release_nvm(hw); | 1853 | nvm->ops.release(hw); |
2025 | goto out; | 1854 | goto out; |
2026 | } | 1855 | } |
2027 | } | 1856 | } |
@@ -2078,8 +1907,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2078 | */ | 1907 | */ |
2079 | if (ret_val) { | 1908 | if (ret_val) { |
2080 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | 1909 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ |
2081 | hw_dbg(hw, "Flash commit failed.\n"); | 1910 | e_dbg("Flash commit failed.\n"); |
2082 | nvm->ops.release_nvm(hw); | 1911 | nvm->ops.release(hw); |
2083 | goto out; | 1912 | goto out; |
2084 | } | 1913 | } |
2085 | 1914 | ||
@@ -2092,7 +1921,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2092 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | 1921 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; |
2093 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); | 1922 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); |
2094 | if (ret_val) { | 1923 | if (ret_val) { |
2095 | nvm->ops.release_nvm(hw); | 1924 | nvm->ops.release(hw); |
2096 | goto out; | 1925 | goto out; |
2097 | } | 1926 | } |
2098 | data &= 0xBFFF; | 1927 | data &= 0xBFFF; |
@@ -2100,7 +1929,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2100 | act_offset * 2 + 1, | 1929 | act_offset * 2 + 1, |
2101 | (u8)(data >> 8)); | 1930 | (u8)(data >> 8)); |
2102 | if (ret_val) { | 1931 | if (ret_val) { |
2103 | nvm->ops.release_nvm(hw); | 1932 | nvm->ops.release(hw); |
2104 | goto out; | 1933 | goto out; |
2105 | } | 1934 | } |
2106 | 1935 | ||
@@ -2113,17 +1942,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2113 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | 1942 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; |
2114 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | 1943 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); |
2115 | if (ret_val) { | 1944 | if (ret_val) { |
2116 | nvm->ops.release_nvm(hw); | 1945 | nvm->ops.release(hw); |
2117 | goto out; | 1946 | goto out; |
2118 | } | 1947 | } |
2119 | 1948 | ||
2120 | /* Great! Everything worked, we can now clear the cached entries. */ | 1949 | /* Great! Everything worked, we can now clear the cached entries. */ |
2121 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | 1950 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { |
2122 | dev_spec->shadow_ram[i].modified = 0; | 1951 | dev_spec->shadow_ram[i].modified = false; |
2123 | dev_spec->shadow_ram[i].value = 0xFFFF; | 1952 | dev_spec->shadow_ram[i].value = 0xFFFF; |
2124 | } | 1953 | } |
2125 | 1954 | ||
2126 | nvm->ops.release_nvm(hw); | 1955 | nvm->ops.release(hw); |
2127 | 1956 | ||
2128 | /* | 1957 | /* |
2129 | * Reload the EEPROM, or else modifications will not appear | 1958 | * Reload the EEPROM, or else modifications will not appear |
@@ -2134,7 +1963,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2134 | 1963 | ||
2135 | out: | 1964 | out: |
2136 | if (ret_val) | 1965 | if (ret_val) |
2137 | hw_dbg(hw, "NVM update error: %d\n", ret_val); | 1966 | e_dbg("NVM update error: %d\n", ret_val); |
2138 | 1967 | ||
2139 | return ret_val; | 1968 | return ret_val; |
2140 | } | 1969 | } |
@@ -2192,7 +2021,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2192 | union ich8_hws_flash_status hsfsts; | 2021 | union ich8_hws_flash_status hsfsts; |
2193 | u32 gfpreg; | 2022 | u32 gfpreg; |
2194 | 2023 | ||
2195 | nvm->ops.acquire_nvm(hw); | 2024 | nvm->ops.acquire(hw); |
2196 | 2025 | ||
2197 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 2026 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
2198 | 2027 | ||
@@ -2213,7 +2042,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2213 | hsfsts.hsf_status.flockdn = true; | 2042 | hsfsts.hsf_status.flockdn = true; |
2214 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 2043 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
2215 | 2044 | ||
2216 | nvm->ops.release_nvm(hw); | 2045 | nvm->ops.release(hw); |
2217 | } | 2046 | } |
2218 | 2047 | ||
2219 | /** | 2048 | /** |
@@ -2284,7 +2113,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
2284 | /* Repeat for some time before giving up. */ | 2113 | /* Repeat for some time before giving up. */ |
2285 | continue; | 2114 | continue; |
2286 | if (hsfsts.hsf_status.flcdone == 0) { | 2115 | if (hsfsts.hsf_status.flcdone == 0) { |
2287 | hw_dbg(hw, "Timeout error - flash cycle " | 2116 | e_dbg("Timeout error - flash cycle " |
2288 | "did not complete."); | 2117 | "did not complete."); |
2289 | break; | 2118 | break; |
2290 | } | 2119 | } |
@@ -2329,7 +2158,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | |||
2329 | return ret_val; | 2158 | return ret_val; |
2330 | 2159 | ||
2331 | for (program_retries = 0; program_retries < 100; program_retries++) { | 2160 | for (program_retries = 0; program_retries < 100; program_retries++) { |
2332 | hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); | 2161 | e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); |
2333 | udelay(100); | 2162 | udelay(100); |
2334 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | 2163 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); |
2335 | if (!ret_val) | 2164 | if (!ret_val) |
@@ -2359,9 +2188,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
2359 | u32 flash_bank_size = nvm->flash_bank_size * 2; | 2188 | u32 flash_bank_size = nvm->flash_bank_size * 2; |
2360 | s32 ret_val; | 2189 | s32 ret_val; |
2361 | s32 count = 0; | 2190 | s32 count = 0; |
2362 | s32 iteration; | 2191 | s32 j, iteration, sector_size; |
2363 | s32 sector_size; | ||
2364 | s32 j; | ||
2365 | 2192 | ||
2366 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | 2193 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); |
2367 | 2194 | ||
@@ -2464,7 +2291,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) | |||
2464 | 2291 | ||
2465 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 2292 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
2466 | if (ret_val) { | 2293 | if (ret_val) { |
2467 | hw_dbg(hw, "NVM Read Error\n"); | 2294 | e_dbg("NVM Read Error\n"); |
2468 | return ret_val; | 2295 | return ret_val; |
2469 | } | 2296 | } |
2470 | 2297 | ||
@@ -2594,10 +2421,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2594 | */ | 2421 | */ |
2595 | ret_val = e1000e_disable_pcie_master(hw); | 2422 | ret_val = e1000e_disable_pcie_master(hw); |
2596 | if (ret_val) { | 2423 | if (ret_val) { |
2597 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 2424 | e_dbg("PCI-E Master disable polling has failed.\n"); |
2598 | } | 2425 | } |
2599 | 2426 | ||
2600 | hw_dbg(hw, "Masking off all interrupts\n"); | 2427 | e_dbg("Masking off all interrupts\n"); |
2601 | ew32(IMC, 0xffffffff); | 2428 | ew32(IMC, 0xffffffff); |
2602 | 2429 | ||
2603 | /* | 2430 | /* |
@@ -2648,8 +2475,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2648 | ctrl |= E1000_CTRL_PHY_RST; | 2475 | ctrl |= E1000_CTRL_PHY_RST; |
2649 | } | 2476 | } |
2650 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 2477 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
2651 | /* Whether or not the swflag was acquired, we need to reset the part */ | 2478 | e_dbg("Issuing a global reset to ich8lan\n"); |
2652 | hw_dbg(hw, "Issuing a global reset to ich8lan\n"); | ||
2653 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 2479 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
2654 | msleep(20); | 2480 | msleep(20); |
2655 | 2481 | ||
@@ -2669,7 +2495,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2669 | * return with an error. This can happen in situations | 2495 | * return with an error. This can happen in situations |
2670 | * where there is no eeprom and prevents getting link. | 2496 | * where there is no eeprom and prevents getting link. |
2671 | */ | 2497 | */ |
2672 | hw_dbg(hw, "Auto Read Done did not complete\n"); | 2498 | e_dbg("Auto Read Done did not complete\n"); |
2673 | } | 2499 | } |
2674 | } | 2500 | } |
2675 | /* Dummy read to clear the phy wakeup bit after lcd reset */ | 2501 | /* Dummy read to clear the phy wakeup bit after lcd reset */ |
@@ -2730,16 +2556,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2730 | 2556 | ||
2731 | /* Initialize identification LED */ | 2557 | /* Initialize identification LED */ |
2732 | ret_val = mac->ops.id_led_init(hw); | 2558 | ret_val = mac->ops.id_led_init(hw); |
2733 | if (ret_val) { | 2559 | if (ret_val) |
2734 | hw_dbg(hw, "Error initializing identification LED\n"); | 2560 | e_dbg("Error initializing identification LED\n"); |
2735 | return ret_val; | 2561 | /* This is not fatal and we should not stop init due to this */ |
2736 | } | ||
2737 | 2562 | ||
2738 | /* Setup the receive address. */ | 2563 | /* Setup the receive address. */ |
2739 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 2564 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
2740 | 2565 | ||
2741 | /* Zero out the Multicast HASH table */ | 2566 | /* Zero out the Multicast HASH table */ |
2742 | hw_dbg(hw, "Zeroing the MTA\n"); | 2567 | e_dbg("Zeroing the MTA\n"); |
2743 | for (i = 0; i < mac->mta_reg_count; i++) | 2568 | for (i = 0; i < mac->mta_reg_count; i++) |
2744 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 2569 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
2745 | 2570 | ||
@@ -2749,7 +2574,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2749 | * Reset the phy after disabling host wakeup to reset the Rx buffer. | 2574 | * Reset the phy after disabling host wakeup to reset the Rx buffer. |
2750 | */ | 2575 | */ |
2751 | if (hw->phy.type == e1000_phy_82578) { | 2576 | if (hw->phy.type == e1000_phy_82578) { |
2752 | hw->phy.ops.read_phy_reg(hw, BM_WUC, &i); | 2577 | hw->phy.ops.read_reg(hw, BM_WUC, &i); |
2753 | ret_val = e1000_phy_hw_reset_ich8lan(hw); | 2578 | ret_val = e1000_phy_hw_reset_ich8lan(hw); |
2754 | if (ret_val) | 2579 | if (ret_val) |
2755 | return ret_val; | 2580 | return ret_val; |
@@ -2885,7 +2710,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2885 | */ | 2710 | */ |
2886 | hw->fc.current_mode = hw->fc.requested_mode; | 2711 | hw->fc.current_mode = hw->fc.requested_mode; |
2887 | 2712 | ||
2888 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 2713 | e_dbg("After fix-ups FlowControl is now = %x\n", |
2889 | hw->fc.current_mode); | 2714 | hw->fc.current_mode); |
2890 | 2715 | ||
2891 | /* Continue to configure the copper link. */ | 2716 | /* Continue to configure the copper link. */ |
@@ -2896,7 +2721,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2896 | ew32(FCTTV, hw->fc.pause_time); | 2721 | ew32(FCTTV, hw->fc.pause_time); |
2897 | if ((hw->phy.type == e1000_phy_82578) || | 2722 | if ((hw->phy.type == e1000_phy_82578) || |
2898 | (hw->phy.type == e1000_phy_82577)) { | 2723 | (hw->phy.type == e1000_phy_82577)) { |
2899 | ret_val = hw->phy.ops.write_phy_reg(hw, | 2724 | ret_val = hw->phy.ops.write_reg(hw, |
2900 | PHY_REG(BM_PORT_CTRL_PAGE, 27), | 2725 | PHY_REG(BM_PORT_CTRL_PAGE, 27), |
2901 | hw->fc.pause_time); | 2726 | hw->fc.pause_time); |
2902 | if (ret_val) | 2727 | if (ret_val) |
@@ -2959,7 +2784,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2959 | return ret_val; | 2784 | return ret_val; |
2960 | break; | 2785 | break; |
2961 | case e1000_phy_ife: | 2786 | case e1000_phy_ife: |
2962 | ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2787 | ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, |
2963 | ®_data); | 2788 | ®_data); |
2964 | if (ret_val) | 2789 | if (ret_val) |
2965 | return ret_val; | 2790 | return ret_val; |
@@ -2978,7 +2803,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2978 | reg_data |= IFE_PMC_AUTO_MDIX; | 2803 | reg_data |= IFE_PMC_AUTO_MDIX; |
2979 | break; | 2804 | break; |
2980 | } | 2805 | } |
2981 | ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2806 | ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, |
2982 | reg_data); | 2807 | reg_data); |
2983 | if (ret_val) | 2808 | if (ret_val) |
2984 | return ret_val; | 2809 | return ret_val; |
@@ -3091,8 +2916,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
3091 | * @hw: pointer to the HW structure | 2916 | * @hw: pointer to the HW structure |
3092 | * @state: boolean value used to set the current Kumeran workaround state | 2917 | * @state: boolean value used to set the current Kumeran workaround state |
3093 | * | 2918 | * |
3094 | * If ICH8, set the current Kumeran workaround state (enabled - TRUE | 2919 | * If ICH8, set the current Kumeran workaround state (enabled - true |
3095 | * /disabled - FALSE). | 2920 | * /disabled - false). |
3096 | **/ | 2921 | **/ |
3097 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | 2922 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, |
3098 | bool state) | 2923 | bool state) |
@@ -3100,7 +2925,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
3100 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 2925 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
3101 | 2926 | ||
3102 | if (hw->mac.type != e1000_ich8lan) { | 2927 | if (hw->mac.type != e1000_ich8lan) { |
3103 | hw_dbg(hw, "Workaround applies to ICH8 only.\n"); | 2928 | e_dbg("Workaround applies to ICH8 only.\n"); |
3104 | return; | 2929 | return; |
3105 | } | 2930 | } |
3106 | 2931 | ||
@@ -3208,6 +3033,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) | |||
3208 | u32 phy_ctrl; | 3033 | u32 phy_ctrl; |
3209 | 3034 | ||
3210 | switch (hw->mac.type) { | 3035 | switch (hw->mac.type) { |
3036 | case e1000_ich8lan: | ||
3211 | case e1000_ich9lan: | 3037 | case e1000_ich9lan: |
3212 | case e1000_ich10lan: | 3038 | case e1000_ich10lan: |
3213 | case e1000_pchlan: | 3039 | case e1000_pchlan: |
@@ -3280,7 +3106,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | |||
3280 | **/ | 3106 | **/ |
3281 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | 3107 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) |
3282 | { | 3108 | { |
3283 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3109 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3284 | (u16)hw->mac.ledctl_mode1); | 3110 | (u16)hw->mac.ledctl_mode1); |
3285 | } | 3111 | } |
3286 | 3112 | ||
@@ -3292,7 +3118,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | |||
3292 | **/ | 3118 | **/ |
3293 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) | 3119 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) |
3294 | { | 3120 | { |
3295 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3121 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3296 | (u16)hw->mac.ledctl_default); | 3122 | (u16)hw->mac.ledctl_default); |
3297 | } | 3123 | } |
3298 | 3124 | ||
@@ -3324,7 +3150,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) | |||
3324 | } | 3150 | } |
3325 | } | 3151 | } |
3326 | 3152 | ||
3327 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3153 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3328 | } | 3154 | } |
3329 | 3155 | ||
3330 | /** | 3156 | /** |
@@ -3355,7 +3181,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) | |||
3355 | } | 3181 | } |
3356 | } | 3182 | } |
3357 | 3183 | ||
3358 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3184 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3359 | } | 3185 | } |
3360 | 3186 | ||
3361 | /** | 3187 | /** |
@@ -3378,8 +3204,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3378 | if (status & E1000_STATUS_PHYRA) | 3204 | if (status & E1000_STATUS_PHYRA) |
3379 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); | 3205 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); |
3380 | else | 3206 | else |
3381 | hw_dbg(hw, | 3207 | e_dbg("PHY Reset Asserted not set - needs delay\n"); |
3382 | "PHY Reset Asserted not set - needs delay\n"); | ||
3383 | } | 3208 | } |
3384 | 3209 | ||
3385 | e1000e_get_cfg_done(hw); | 3210 | e1000e_get_cfg_done(hw); |
@@ -3394,7 +3219,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3394 | } else { | 3219 | } else { |
3395 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { | 3220 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { |
3396 | /* Maybe we should do a basic PHY config */ | 3221 | /* Maybe we should do a basic PHY config */ |
3397 | hw_dbg(hw, "EEPROM not present\n"); | 3222 | e_dbg("EEPROM not present\n"); |
3398 | return -E1000_ERR_CONFIG; | 3223 | return -E1000_ERR_CONFIG; |
3399 | } | 3224 | } |
3400 | } | 3225 | } |
@@ -3403,6 +3228,23 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3403 | } | 3228 | } |
3404 | 3229 | ||
3405 | /** | 3230 | /** |
3231 | * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down | ||
3232 | * @hw: pointer to the HW structure | ||
3233 | * | ||
3234 | * In the case of a PHY power down to save power, or to turn off link during a | ||
3235 | * driver unload, or wake on lan is not enabled, remove the link. | ||
3236 | **/ | ||
3237 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) | ||
3238 | { | ||
3239 | /* If the management interface is not enabled, then power down */ | ||
3240 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
3241 | hw->phy.ops.check_reset_block(hw))) | ||
3242 | e1000_power_down_phy_copper(hw); | ||
3243 | |||
3244 | return; | ||
3245 | } | ||
3246 | |||
3247 | /** | ||
3406 | * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters | 3248 | * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters |
3407 | * @hw: pointer to the HW structure | 3249 | * @hw: pointer to the HW structure |
3408 | * | 3250 | * |
@@ -3411,42 +3253,41 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3411 | **/ | 3253 | **/ |
3412 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) | 3254 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) |
3413 | { | 3255 | { |
3414 | u32 temp; | ||
3415 | u16 phy_data; | 3256 | u16 phy_data; |
3416 | 3257 | ||
3417 | e1000e_clear_hw_cntrs_base(hw); | 3258 | e1000e_clear_hw_cntrs_base(hw); |
3418 | 3259 | ||
3419 | temp = er32(ALGNERRC); | 3260 | er32(ALGNERRC); |
3420 | temp = er32(RXERRC); | 3261 | er32(RXERRC); |
3421 | temp = er32(TNCRS); | 3262 | er32(TNCRS); |
3422 | temp = er32(CEXTERR); | 3263 | er32(CEXTERR); |
3423 | temp = er32(TSCTC); | 3264 | er32(TSCTC); |
3424 | temp = er32(TSCTFC); | 3265 | er32(TSCTFC); |
3425 | 3266 | ||
3426 | temp = er32(MGTPRC); | 3267 | er32(MGTPRC); |
3427 | temp = er32(MGTPDC); | 3268 | er32(MGTPDC); |
3428 | temp = er32(MGTPTC); | 3269 | er32(MGTPTC); |
3429 | 3270 | ||
3430 | temp = er32(IAC); | 3271 | er32(IAC); |
3431 | temp = er32(ICRXOC); | 3272 | er32(ICRXOC); |
3432 | 3273 | ||
3433 | /* Clear PHY statistics registers */ | 3274 | /* Clear PHY statistics registers */ |
3434 | if ((hw->phy.type == e1000_phy_82578) || | 3275 | if ((hw->phy.type == e1000_phy_82578) || |
3435 | (hw->phy.type == e1000_phy_82577)) { | 3276 | (hw->phy.type == e1000_phy_82577)) { |
3436 | hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data); | 3277 | hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); |
3437 | hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data); | 3278 | hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); |
3438 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data); | 3279 | hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); |
3439 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data); | 3280 | hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); |
3440 | hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data); | 3281 | hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); |
3441 | hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data); | 3282 | hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); |
3442 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data); | 3283 | hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); |
3443 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data); | 3284 | hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); |
3444 | hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data); | 3285 | hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); |
3445 | hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data); | 3286 | hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); |
3446 | hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data); | 3287 | hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); |
3447 | hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data); | 3288 | hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); |
3448 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data); | 3289 | hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); |
3449 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data); | 3290 | hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); |
3450 | } | 3291 | } |
3451 | } | 3292 | } |
3452 | 3293 | ||
@@ -3469,29 +3310,27 @@ static struct e1000_mac_operations ich8_mac_ops = { | |||
3469 | }; | 3310 | }; |
3470 | 3311 | ||
3471 | static struct e1000_phy_operations ich8_phy_ops = { | 3312 | static struct e1000_phy_operations ich8_phy_ops = { |
3472 | .acquire_phy = e1000_acquire_swflag_ich8lan, | 3313 | .acquire = e1000_acquire_swflag_ich8lan, |
3473 | .check_reset_block = e1000_check_reset_block_ich8lan, | 3314 | .check_reset_block = e1000_check_reset_block_ich8lan, |
3474 | .commit_phy = NULL, | 3315 | .commit = NULL, |
3475 | .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, | ||
3476 | .get_cfg_done = e1000_get_cfg_done_ich8lan, | 3316 | .get_cfg_done = e1000_get_cfg_done_ich8lan, |
3477 | .get_cable_length = e1000e_get_cable_length_igp_2, | 3317 | .get_cable_length = e1000e_get_cable_length_igp_2, |
3478 | .get_phy_info = e1000_get_phy_info_ich8lan, | 3318 | .read_reg = e1000e_read_phy_reg_igp, |
3479 | .read_phy_reg = e1000e_read_phy_reg_igp, | 3319 | .release = e1000_release_swflag_ich8lan, |
3480 | .release_phy = e1000_release_swflag_ich8lan, | 3320 | .reset = e1000_phy_hw_reset_ich8lan, |
3481 | .reset_phy = e1000_phy_hw_reset_ich8lan, | ||
3482 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, | 3321 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, |
3483 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, | 3322 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, |
3484 | .write_phy_reg = e1000e_write_phy_reg_igp, | 3323 | .write_reg = e1000e_write_phy_reg_igp, |
3485 | }; | 3324 | }; |
3486 | 3325 | ||
3487 | static struct e1000_nvm_operations ich8_nvm_ops = { | 3326 | static struct e1000_nvm_operations ich8_nvm_ops = { |
3488 | .acquire_nvm = e1000_acquire_nvm_ich8lan, | 3327 | .acquire = e1000_acquire_nvm_ich8lan, |
3489 | .read_nvm = e1000_read_nvm_ich8lan, | 3328 | .read = e1000_read_nvm_ich8lan, |
3490 | .release_nvm = e1000_release_nvm_ich8lan, | 3329 | .release = e1000_release_nvm_ich8lan, |
3491 | .update_nvm = e1000_update_nvm_checksum_ich8lan, | 3330 | .update = e1000_update_nvm_checksum_ich8lan, |
3492 | .valid_led_default = e1000_valid_led_default_ich8lan, | 3331 | .valid_led_default = e1000_valid_led_default_ich8lan, |
3493 | .validate_nvm = e1000_validate_nvm_checksum_ich8lan, | 3332 | .validate = e1000_validate_nvm_checksum_ich8lan, |
3494 | .write_nvm = e1000_write_nvm_ich8lan, | 3333 | .write = e1000_write_nvm_ich8lan, |
3495 | }; | 3334 | }; |
3496 | 3335 | ||
3497 | struct e1000_info e1000_ich8_info = { | 3336 | struct e1000_info e1000_ich8_info = { |
@@ -3558,6 +3397,7 @@ struct e1000_info e1000_pch_info = { | |||
3558 | | FLAG_HAS_AMT | 3397 | | FLAG_HAS_AMT |
3559 | | FLAG_HAS_FLASH | 3398 | | FLAG_HAS_FLASH |
3560 | | FLAG_HAS_JUMBO_FRAMES | 3399 | | FLAG_HAS_JUMBO_FRAMES |
3400 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | ||
3561 | | FLAG_APME_IN_WUC, | 3401 | | FLAG_APME_IN_WUC, |
3562 | .pba = 26, | 3402 | .pba = 26, |
3563 | .max_hw_frame_size = 4096, | 3403 | .max_hw_frame_size = 4096, |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 99ba2b8a2a05..a86c17548c1e 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -26,11 +26,6 @@ | |||
26 | 26 | ||
27 | *******************************************************************************/ | 27 | *******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/pci.h> | ||
33 | |||
34 | #include "e1000.h" | 29 | #include "e1000.h" |
35 | 30 | ||
36 | enum e1000_mng_mode { | 31 | enum e1000_mng_mode { |
@@ -87,7 +82,24 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | |||
87 | } | 82 | } |
88 | 83 | ||
89 | /** | 84 | /** |
90 | * e1000e_write_vfta - Write value to VLAN filter table | 85 | * e1000_clear_vfta_generic - Clear VLAN filter table |
86 | * @hw: pointer to the HW structure | ||
87 | * | ||
88 | * Clears the register array which contains the VLAN filter table by | ||
89 | * setting all the values to 0. | ||
90 | **/ | ||
91 | void e1000_clear_vfta_generic(struct e1000_hw *hw) | ||
92 | { | ||
93 | u32 offset; | ||
94 | |||
95 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | ||
96 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); | ||
97 | e1e_flush(); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * e1000_write_vfta_generic - Write value to VLAN filter table | ||
91 | * @hw: pointer to the HW structure | 103 | * @hw: pointer to the HW structure |
92 | * @offset: register offset in VLAN filter table | 104 | * @offset: register offset in VLAN filter table |
93 | * @value: register value written to VLAN filter table | 105 | * @value: register value written to VLAN filter table |
@@ -95,7 +107,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | |||
95 | * Writes value at the given offset in the register array which stores | 107 | * Writes value at the given offset in the register array which stores |
96 | * the VLAN filter table. | 108 | * the VLAN filter table. |
97 | **/ | 109 | **/ |
98 | void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | 110 | void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) |
99 | { | 111 | { |
100 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); | 112 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); |
101 | e1e_flush(); | 113 | e1e_flush(); |
@@ -115,12 +127,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | |||
115 | u32 i; | 127 | u32 i; |
116 | 128 | ||
117 | /* Setup the receive address */ | 129 | /* Setup the receive address */ |
118 | hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); | 130 | e_dbg("Programming MAC Address into RAR[0]\n"); |
119 | 131 | ||
120 | e1000e_rar_set(hw, hw->mac.addr, 0); | 132 | e1000e_rar_set(hw, hw->mac.addr, 0); |
121 | 133 | ||
122 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 134 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
123 | hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); | 135 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
124 | for (i = 1; i < rar_count; i++) { | 136 | for (i = 1; i < rar_count; i++) { |
125 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 137 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); |
126 | e1e_flush(); | 138 | e1e_flush(); |
@@ -276,7 +288,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
276 | for (; mc_addr_count > 0; mc_addr_count--) { | 288 | for (; mc_addr_count > 0; mc_addr_count--) { |
277 | u32 hash_value, hash_reg, hash_bit, mta; | 289 | u32 hash_value, hash_reg, hash_bit, mta; |
278 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | 290 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
279 | hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); | 291 | e_dbg("Hash value = 0x%03X\n", hash_value); |
280 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | 292 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
281 | hash_bit = hash_value & 0x1F; | 293 | hash_bit = hash_value & 0x1F; |
282 | mta = (1 << hash_bit); | 294 | mta = (1 << hash_bit); |
@@ -300,45 +312,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
300 | **/ | 312 | **/ |
301 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) | 313 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) |
302 | { | 314 | { |
303 | u32 temp; | 315 | er32(CRCERRS); |
304 | 316 | er32(SYMERRS); | |
305 | temp = er32(CRCERRS); | 317 | er32(MPC); |
306 | temp = er32(SYMERRS); | 318 | er32(SCC); |
307 | temp = er32(MPC); | 319 | er32(ECOL); |
308 | temp = er32(SCC); | 320 | er32(MCC); |
309 | temp = er32(ECOL); | 321 | er32(LATECOL); |
310 | temp = er32(MCC); | 322 | er32(COLC); |
311 | temp = er32(LATECOL); | 323 | er32(DC); |
312 | temp = er32(COLC); | 324 | er32(SEC); |
313 | temp = er32(DC); | 325 | er32(RLEC); |
314 | temp = er32(SEC); | 326 | er32(XONRXC); |
315 | temp = er32(RLEC); | 327 | er32(XONTXC); |
316 | temp = er32(XONRXC); | 328 | er32(XOFFRXC); |
317 | temp = er32(XONTXC); | 329 | er32(XOFFTXC); |
318 | temp = er32(XOFFRXC); | 330 | er32(FCRUC); |
319 | temp = er32(XOFFTXC); | 331 | er32(GPRC); |
320 | temp = er32(FCRUC); | 332 | er32(BPRC); |
321 | temp = er32(GPRC); | 333 | er32(MPRC); |
322 | temp = er32(BPRC); | 334 | er32(GPTC); |
323 | temp = er32(MPRC); | 335 | er32(GORCL); |
324 | temp = er32(GPTC); | 336 | er32(GORCH); |
325 | temp = er32(GORCL); | 337 | er32(GOTCL); |
326 | temp = er32(GORCH); | 338 | er32(GOTCH); |
327 | temp = er32(GOTCL); | 339 | er32(RNBC); |
328 | temp = er32(GOTCH); | 340 | er32(RUC); |
329 | temp = er32(RNBC); | 341 | er32(RFC); |
330 | temp = er32(RUC); | 342 | er32(ROC); |
331 | temp = er32(RFC); | 343 | er32(RJC); |
332 | temp = er32(ROC); | 344 | er32(TORL); |
333 | temp = er32(RJC); | 345 | er32(TORH); |
334 | temp = er32(TORL); | 346 | er32(TOTL); |
335 | temp = er32(TORH); | 347 | er32(TOTH); |
336 | temp = er32(TOTL); | 348 | er32(TPR); |
337 | temp = er32(TOTH); | 349 | er32(TPT); |
338 | temp = er32(TPR); | 350 | er32(MPTC); |
339 | temp = er32(TPT); | 351 | er32(BPTC); |
340 | temp = er32(MPTC); | ||
341 | temp = er32(BPTC); | ||
342 | } | 352 | } |
343 | 353 | ||
344 | /** | 354 | /** |
@@ -376,7 +386,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
376 | if (!link) | 386 | if (!link) |
377 | return ret_val; /* No link detected */ | 387 | return ret_val; /* No link detected */ |
378 | 388 | ||
379 | mac->get_link_status = 0; | 389 | mac->get_link_status = false; |
380 | 390 | ||
381 | /* | 391 | /* |
382 | * Check if there was DownShift, must be checked | 392 | * Check if there was DownShift, must be checked |
@@ -408,7 +418,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
408 | */ | 418 | */ |
409 | ret_val = e1000e_config_fc_after_link_up(hw); | 419 | ret_val = e1000e_config_fc_after_link_up(hw); |
410 | if (ret_val) { | 420 | if (ret_val) { |
411 | hw_dbg(hw, "Error configuring flow control\n"); | 421 | e_dbg("Error configuring flow control\n"); |
412 | } | 422 | } |
413 | 423 | ||
414 | return ret_val; | 424 | return ret_val; |
@@ -448,7 +458,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
448 | mac->autoneg_failed = 1; | 458 | mac->autoneg_failed = 1; |
449 | return 0; | 459 | return 0; |
450 | } | 460 | } |
451 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 461 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
452 | 462 | ||
453 | /* Disable auto-negotiation in the TXCW register */ | 463 | /* Disable auto-negotiation in the TXCW register */ |
454 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 464 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -461,7 +471,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
461 | /* Configure Flow Control after forcing link up. */ | 471 | /* Configure Flow Control after forcing link up. */ |
462 | ret_val = e1000e_config_fc_after_link_up(hw); | 472 | ret_val = e1000e_config_fc_after_link_up(hw); |
463 | if (ret_val) { | 473 | if (ret_val) { |
464 | hw_dbg(hw, "Error configuring flow control\n"); | 474 | e_dbg("Error configuring flow control\n"); |
465 | return ret_val; | 475 | return ret_val; |
466 | } | 476 | } |
467 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 477 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -471,7 +481,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
471 | * and disable forced link in the Device Control register | 481 | * and disable forced link in the Device Control register |
472 | * in an attempt to auto-negotiate with our link partner. | 482 | * in an attempt to auto-negotiate with our link partner. |
473 | */ | 483 | */ |
474 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 484 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
475 | ew32(TXCW, mac->txcw); | 485 | ew32(TXCW, mac->txcw); |
476 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 486 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
477 | 487 | ||
@@ -513,7 +523,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
513 | mac->autoneg_failed = 1; | 523 | mac->autoneg_failed = 1; |
514 | return 0; | 524 | return 0; |
515 | } | 525 | } |
516 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 526 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
517 | 527 | ||
518 | /* Disable auto-negotiation in the TXCW register */ | 528 | /* Disable auto-negotiation in the TXCW register */ |
519 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 529 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -526,7 +536,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
526 | /* Configure Flow Control after forcing link up. */ | 536 | /* Configure Flow Control after forcing link up. */ |
527 | ret_val = e1000e_config_fc_after_link_up(hw); | 537 | ret_val = e1000e_config_fc_after_link_up(hw); |
528 | if (ret_val) { | 538 | if (ret_val) { |
529 | hw_dbg(hw, "Error configuring flow control\n"); | 539 | e_dbg("Error configuring flow control\n"); |
530 | return ret_val; | 540 | return ret_val; |
531 | } | 541 | } |
532 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 542 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -536,7 +546,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
536 | * and disable forced link in the Device Control register | 546 | * and disable forced link in the Device Control register |
537 | * in an attempt to auto-negotiate with our link partner. | 547 | * in an attempt to auto-negotiate with our link partner. |
538 | */ | 548 | */ |
539 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 549 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
540 | ew32(TXCW, mac->txcw); | 550 | ew32(TXCW, mac->txcw); |
541 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 551 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
542 | 552 | ||
@@ -553,11 +563,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
553 | if (rxcw & E1000_RXCW_SYNCH) { | 563 | if (rxcw & E1000_RXCW_SYNCH) { |
554 | if (!(rxcw & E1000_RXCW_IV)) { | 564 | if (!(rxcw & E1000_RXCW_IV)) { |
555 | mac->serdes_has_link = true; | 565 | mac->serdes_has_link = true; |
556 | hw_dbg(hw, "SERDES: Link up - forced.\n"); | 566 | e_dbg("SERDES: Link up - forced.\n"); |
557 | } | 567 | } |
558 | } else { | 568 | } else { |
559 | mac->serdes_has_link = false; | 569 | mac->serdes_has_link = false; |
560 | hw_dbg(hw, "SERDES: Link down - force failed.\n"); | 570 | e_dbg("SERDES: Link down - force failed.\n"); |
561 | } | 571 | } |
562 | } | 572 | } |
563 | 573 | ||
@@ -570,20 +580,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
570 | if (rxcw & E1000_RXCW_SYNCH) { | 580 | if (rxcw & E1000_RXCW_SYNCH) { |
571 | if (!(rxcw & E1000_RXCW_IV)) { | 581 | if (!(rxcw & E1000_RXCW_IV)) { |
572 | mac->serdes_has_link = true; | 582 | mac->serdes_has_link = true; |
573 | hw_dbg(hw, "SERDES: Link up - autoneg " | 583 | e_dbg("SERDES: Link up - autoneg " |
574 | "completed sucessfully.\n"); | 584 | "completed sucessfully.\n"); |
575 | } else { | 585 | } else { |
576 | mac->serdes_has_link = false; | 586 | mac->serdes_has_link = false; |
577 | hw_dbg(hw, "SERDES: Link down - invalid" | 587 | e_dbg("SERDES: Link down - invalid" |
578 | "codewords detected in autoneg.\n"); | 588 | "codewords detected in autoneg.\n"); |
579 | } | 589 | } |
580 | } else { | 590 | } else { |
581 | mac->serdes_has_link = false; | 591 | mac->serdes_has_link = false; |
582 | hw_dbg(hw, "SERDES: Link down - no sync.\n"); | 592 | e_dbg("SERDES: Link down - no sync.\n"); |
583 | } | 593 | } |
584 | } else { | 594 | } else { |
585 | mac->serdes_has_link = false; | 595 | mac->serdes_has_link = false; |
586 | hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); | 596 | e_dbg("SERDES: Link down - autoneg failed\n"); |
587 | } | 597 | } |
588 | } | 598 | } |
589 | 599 | ||
@@ -614,7 +624,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
614 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | 624 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
615 | 625 | ||
616 | if (ret_val) { | 626 | if (ret_val) { |
617 | hw_dbg(hw, "NVM Read Error\n"); | 627 | e_dbg("NVM Read Error\n"); |
618 | return ret_val; | 628 | return ret_val; |
619 | } | 629 | } |
620 | 630 | ||
@@ -667,7 +677,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
667 | */ | 677 | */ |
668 | hw->fc.current_mode = hw->fc.requested_mode; | 678 | hw->fc.current_mode = hw->fc.requested_mode; |
669 | 679 | ||
670 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 680 | e_dbg("After fix-ups FlowControl is now = %x\n", |
671 | hw->fc.current_mode); | 681 | hw->fc.current_mode); |
672 | 682 | ||
673 | /* Call the necessary media_type subroutine to configure the link. */ | 683 | /* Call the necessary media_type subroutine to configure the link. */ |
@@ -681,7 +691,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
681 | * control is disabled, because it does not hurt anything to | 691 | * control is disabled, because it does not hurt anything to |
682 | * initialize these registers. | 692 | * initialize these registers. |
683 | */ | 693 | */ |
684 | hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); | 694 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
685 | ew32(FCT, FLOW_CONTROL_TYPE); | 695 | ew32(FCT, FLOW_CONTROL_TYPE); |
686 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 696 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
687 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 697 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
@@ -751,7 +761,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
751 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 761 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
752 | break; | 762 | break; |
753 | default: | 763 | default: |
754 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 764 | e_dbg("Flow control param set incorrectly\n"); |
755 | return -E1000_ERR_CONFIG; | 765 | return -E1000_ERR_CONFIG; |
756 | break; | 766 | break; |
757 | } | 767 | } |
@@ -789,7 +799,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
789 | break; | 799 | break; |
790 | } | 800 | } |
791 | if (i == FIBER_LINK_UP_LIMIT) { | 801 | if (i == FIBER_LINK_UP_LIMIT) { |
792 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 802 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
793 | mac->autoneg_failed = 1; | 803 | mac->autoneg_failed = 1; |
794 | /* | 804 | /* |
795 | * AutoNeg failed to achieve a link, so we'll call | 805 | * AutoNeg failed to achieve a link, so we'll call |
@@ -799,13 +809,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
799 | */ | 809 | */ |
800 | ret_val = mac->ops.check_for_link(hw); | 810 | ret_val = mac->ops.check_for_link(hw); |
801 | if (ret_val) { | 811 | if (ret_val) { |
802 | hw_dbg(hw, "Error while checking for link\n"); | 812 | e_dbg("Error while checking for link\n"); |
803 | return ret_val; | 813 | return ret_val; |
804 | } | 814 | } |
805 | mac->autoneg_failed = 0; | 815 | mac->autoneg_failed = 0; |
806 | } else { | 816 | } else { |
807 | mac->autoneg_failed = 0; | 817 | mac->autoneg_failed = 0; |
808 | hw_dbg(hw, "Valid Link Found\n"); | 818 | e_dbg("Valid Link Found\n"); |
809 | } | 819 | } |
810 | 820 | ||
811 | return 0; | 821 | return 0; |
@@ -841,7 +851,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
841 | * then the link-up status bit will be set and the flow control enable | 851 | * then the link-up status bit will be set and the flow control enable |
842 | * bits (RFCE and TFCE) will be set according to their negotiated value. | 852 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
843 | */ | 853 | */ |
844 | hw_dbg(hw, "Auto-negotiation enabled\n"); | 854 | e_dbg("Auto-negotiation enabled\n"); |
845 | 855 | ||
846 | ew32(CTRL, ctrl); | 856 | ew32(CTRL, ctrl); |
847 | e1e_flush(); | 857 | e1e_flush(); |
@@ -856,7 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
856 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 866 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
857 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 867 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
858 | } else { | 868 | } else { |
859 | hw_dbg(hw, "No signal detected\n"); | 869 | e_dbg("No signal detected\n"); |
860 | } | 870 | } |
861 | 871 | ||
862 | return 0; | 872 | return 0; |
@@ -952,7 +962,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
952 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 962 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
953 | * other: No other values should be possible at this point. | 963 | * other: No other values should be possible at this point. |
954 | */ | 964 | */ |
955 | hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); | 965 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
956 | 966 | ||
957 | switch (hw->fc.current_mode) { | 967 | switch (hw->fc.current_mode) { |
958 | case e1000_fc_none: | 968 | case e1000_fc_none: |
@@ -970,7 +980,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
970 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | 980 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
971 | break; | 981 | break; |
972 | default: | 982 | default: |
973 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 983 | e_dbg("Flow control param set incorrectly\n"); |
974 | return -E1000_ERR_CONFIG; | 984 | return -E1000_ERR_CONFIG; |
975 | } | 985 | } |
976 | 986 | ||
@@ -1011,7 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1011 | } | 1021 | } |
1012 | 1022 | ||
1013 | if (ret_val) { | 1023 | if (ret_val) { |
1014 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1024 | e_dbg("Error forcing flow control settings\n"); |
1015 | return ret_val; | 1025 | return ret_val; |
1016 | } | 1026 | } |
1017 | 1027 | ||
@@ -1035,7 +1045,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1035 | return ret_val; | 1045 | return ret_val; |
1036 | 1046 | ||
1037 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | 1047 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
1038 | hw_dbg(hw, "Copper PHY and Auto Neg " | 1048 | e_dbg("Copper PHY and Auto Neg " |
1039 | "has not completed.\n"); | 1049 | "has not completed.\n"); |
1040 | return ret_val; | 1050 | return ret_val; |
1041 | } | 1051 | } |
@@ -1076,7 +1086,6 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1076 | * 1 | 1 | 0 | 0 | e1000_fc_none | 1086 | * 1 | 1 | 0 | 0 | e1000_fc_none |
1077 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | 1087 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
1078 | * | 1088 | * |
1079 | * | ||
1080 | * Are both PAUSE bits set to 1? If so, this implies | 1089 | * Are both PAUSE bits set to 1? If so, this implies |
1081 | * Symmetric Flow Control is enabled at both ends. The | 1090 | * Symmetric Flow Control is enabled at both ends. The |
1082 | * ASM_DIR bits are irrelevant per the spec. | 1091 | * ASM_DIR bits are irrelevant per the spec. |
@@ -1100,10 +1109,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1100 | */ | 1109 | */ |
1101 | if (hw->fc.requested_mode == e1000_fc_full) { | 1110 | if (hw->fc.requested_mode == e1000_fc_full) { |
1102 | hw->fc.current_mode = e1000_fc_full; | 1111 | hw->fc.current_mode = e1000_fc_full; |
1103 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1112 | e_dbg("Flow Control = FULL.\r\n"); |
1104 | } else { | 1113 | } else { |
1105 | hw->fc.current_mode = e1000_fc_rx_pause; | 1114 | hw->fc.current_mode = e1000_fc_rx_pause; |
1106 | hw_dbg(hw, "Flow Control = " | 1115 | e_dbg("Flow Control = " |
1107 | "RX PAUSE frames only.\r\n"); | 1116 | "RX PAUSE frames only.\r\n"); |
1108 | } | 1117 | } |
1109 | } | 1118 | } |
@@ -1114,14 +1123,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1114 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1123 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
1115 | *-------|---------|-------|---------|-------------------- | 1124 | *-------|---------|-------|---------|-------------------- |
1116 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause | 1125 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause |
1117 | * | ||
1118 | */ | 1126 | */ |
1119 | else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && | 1127 | else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && |
1120 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1128 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1121 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1129 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1122 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1130 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1123 | hw->fc.current_mode = e1000_fc_tx_pause; | 1131 | hw->fc.current_mode = e1000_fc_tx_pause; |
1124 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); | 1132 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
1125 | } | 1133 | } |
1126 | /* | 1134 | /* |
1127 | * For transmitting PAUSE frames ONLY. | 1135 | * For transmitting PAUSE frames ONLY. |
@@ -1130,21 +1138,20 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1130 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1138 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
1131 | *-------|---------|-------|---------|-------------------- | 1139 | *-------|---------|-------|---------|-------------------- |
1132 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | 1140 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
1133 | * | ||
1134 | */ | 1141 | */ |
1135 | else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | 1142 | else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
1136 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1143 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1137 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1144 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1138 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1145 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1139 | hw->fc.current_mode = e1000_fc_rx_pause; | 1146 | hw->fc.current_mode = e1000_fc_rx_pause; |
1140 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); | 1147 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
1141 | } else { | 1148 | } else { |
1142 | /* | 1149 | /* |
1143 | * Per the IEEE spec, at this point flow control | 1150 | * Per the IEEE spec, at this point flow control |
1144 | * should be disabled. | 1151 | * should be disabled. |
1145 | */ | 1152 | */ |
1146 | hw->fc.current_mode = e1000_fc_none; | 1153 | hw->fc.current_mode = e1000_fc_none; |
1147 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1154 | e_dbg("Flow Control = NONE.\r\n"); |
1148 | } | 1155 | } |
1149 | 1156 | ||
1150 | /* | 1157 | /* |
@@ -1154,7 +1161,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1154 | */ | 1161 | */ |
1155 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | 1162 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
1156 | if (ret_val) { | 1163 | if (ret_val) { |
1157 | hw_dbg(hw, "Error getting link speed and duplex\n"); | 1164 | e_dbg("Error getting link speed and duplex\n"); |
1158 | return ret_val; | 1165 | return ret_val; |
1159 | } | 1166 | } |
1160 | 1167 | ||
@@ -1167,7 +1174,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1167 | */ | 1174 | */ |
1168 | ret_val = e1000e_force_mac_fc(hw); | 1175 | ret_val = e1000e_force_mac_fc(hw); |
1169 | if (ret_val) { | 1176 | if (ret_val) { |
1170 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1177 | e_dbg("Error forcing flow control settings\n"); |
1171 | return ret_val; | 1178 | return ret_val; |
1172 | } | 1179 | } |
1173 | } | 1180 | } |
@@ -1191,21 +1198,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1191 | status = er32(STATUS); | 1198 | status = er32(STATUS); |
1192 | if (status & E1000_STATUS_SPEED_1000) { | 1199 | if (status & E1000_STATUS_SPEED_1000) { |
1193 | *speed = SPEED_1000; | 1200 | *speed = SPEED_1000; |
1194 | hw_dbg(hw, "1000 Mbs, "); | 1201 | e_dbg("1000 Mbs, "); |
1195 | } else if (status & E1000_STATUS_SPEED_100) { | 1202 | } else if (status & E1000_STATUS_SPEED_100) { |
1196 | *speed = SPEED_100; | 1203 | *speed = SPEED_100; |
1197 | hw_dbg(hw, "100 Mbs, "); | 1204 | e_dbg("100 Mbs, "); |
1198 | } else { | 1205 | } else { |
1199 | *speed = SPEED_10; | 1206 | *speed = SPEED_10; |
1200 | hw_dbg(hw, "10 Mbs, "); | 1207 | e_dbg("10 Mbs, "); |
1201 | } | 1208 | } |
1202 | 1209 | ||
1203 | if (status & E1000_STATUS_FD) { | 1210 | if (status & E1000_STATUS_FD) { |
1204 | *duplex = FULL_DUPLEX; | 1211 | *duplex = FULL_DUPLEX; |
1205 | hw_dbg(hw, "Full Duplex\n"); | 1212 | e_dbg("Full Duplex\n"); |
1206 | } else { | 1213 | } else { |
1207 | *duplex = HALF_DUPLEX; | 1214 | *duplex = HALF_DUPLEX; |
1208 | hw_dbg(hw, "Half Duplex\n"); | 1215 | e_dbg("Half Duplex\n"); |
1209 | } | 1216 | } |
1210 | 1217 | ||
1211 | return 0; | 1218 | return 0; |
@@ -1251,7 +1258,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1251 | } | 1258 | } |
1252 | 1259 | ||
1253 | if (i == timeout) { | 1260 | if (i == timeout) { |
1254 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 1261 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
1255 | return -E1000_ERR_NVM; | 1262 | return -E1000_ERR_NVM; |
1256 | } | 1263 | } |
1257 | 1264 | ||
@@ -1270,7 +1277,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1270 | if (i == timeout) { | 1277 | if (i == timeout) { |
1271 | /* Release semaphores */ | 1278 | /* Release semaphores */ |
1272 | e1000e_put_hw_semaphore(hw); | 1279 | e1000e_put_hw_semaphore(hw); |
1273 | hw_dbg(hw, "Driver can't access the NVM\n"); | 1280 | e_dbg("Driver can't access the NVM\n"); |
1274 | return -E1000_ERR_NVM; | 1281 | return -E1000_ERR_NVM; |
1275 | } | 1282 | } |
1276 | 1283 | ||
@@ -1310,7 +1317,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | |||
1310 | } | 1317 | } |
1311 | 1318 | ||
1312 | if (i == AUTO_READ_DONE_TIMEOUT) { | 1319 | if (i == AUTO_READ_DONE_TIMEOUT) { |
1313 | hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); | 1320 | e_dbg("Auto read by HW from NVM has not completed.\n"); |
1314 | return -E1000_ERR_RESET; | 1321 | return -E1000_ERR_RESET; |
1315 | } | 1322 | } |
1316 | 1323 | ||
@@ -1331,7 +1338,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | |||
1331 | 1338 | ||
1332 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1339 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1333 | if (ret_val) { | 1340 | if (ret_val) { |
1334 | hw_dbg(hw, "NVM Read Error\n"); | 1341 | e_dbg("NVM Read Error\n"); |
1335 | return ret_val; | 1342 | return ret_val; |
1336 | } | 1343 | } |
1337 | 1344 | ||
@@ -1585,7 +1592,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | |||
1585 | } | 1592 | } |
1586 | 1593 | ||
1587 | if (!timeout) { | 1594 | if (!timeout) { |
1588 | hw_dbg(hw, "Master requests are pending.\n"); | 1595 | e_dbg("Master requests are pending.\n"); |
1589 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | 1596 | return -E1000_ERR_MASTER_REQUESTS_PENDING; |
1590 | } | 1597 | } |
1591 | 1598 | ||
@@ -1608,7 +1615,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) | |||
1608 | mac->ifs_step_size = IFS_STEP; | 1615 | mac->ifs_step_size = IFS_STEP; |
1609 | mac->ifs_ratio = IFS_RATIO; | 1616 | mac->ifs_ratio = IFS_RATIO; |
1610 | 1617 | ||
1611 | mac->in_ifs_mode = 0; | 1618 | mac->in_ifs_mode = false; |
1612 | ew32(AIT, 0); | 1619 | ew32(AIT, 0); |
1613 | } | 1620 | } |
1614 | 1621 | ||
@@ -1625,7 +1632,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1625 | 1632 | ||
1626 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | 1633 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { |
1627 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | 1634 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { |
1628 | mac->in_ifs_mode = 1; | 1635 | mac->in_ifs_mode = true; |
1629 | if (mac->current_ifs_val < mac->ifs_max_val) { | 1636 | if (mac->current_ifs_val < mac->ifs_max_val) { |
1630 | if (!mac->current_ifs_val) | 1637 | if (!mac->current_ifs_val) |
1631 | mac->current_ifs_val = mac->ifs_min_val; | 1638 | mac->current_ifs_val = mac->ifs_min_val; |
@@ -1639,7 +1646,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1639 | if (mac->in_ifs_mode && | 1646 | if (mac->in_ifs_mode && |
1640 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { | 1647 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { |
1641 | mac->current_ifs_val = 0; | 1648 | mac->current_ifs_val = 0; |
1642 | mac->in_ifs_mode = 0; | 1649 | mac->in_ifs_mode = false; |
1643 | ew32(AIT, 0); | 1650 | ew32(AIT, 0); |
1644 | } | 1651 | } |
1645 | } | 1652 | } |
@@ -1809,7 +1816,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) | |||
1809 | if (!timeout) { | 1816 | if (!timeout) { |
1810 | eecd &= ~E1000_EECD_REQ; | 1817 | eecd &= ~E1000_EECD_REQ; |
1811 | ew32(EECD, eecd); | 1818 | ew32(EECD, eecd); |
1812 | hw_dbg(hw, "Could not acquire NVM grant\n"); | 1819 | e_dbg("Could not acquire NVM grant\n"); |
1813 | return -E1000_ERR_NVM; | 1820 | return -E1000_ERR_NVM; |
1814 | } | 1821 | } |
1815 | 1822 | ||
@@ -1914,7 +1921,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1914 | } | 1921 | } |
1915 | 1922 | ||
1916 | if (!timeout) { | 1923 | if (!timeout) { |
1917 | hw_dbg(hw, "SPI NVM Status error\n"); | 1924 | e_dbg("SPI NVM Status error\n"); |
1918 | return -E1000_ERR_NVM; | 1925 | return -E1000_ERR_NVM; |
1919 | } | 1926 | } |
1920 | } | 1927 | } |
@@ -1943,7 +1950,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1943 | */ | 1950 | */ |
1944 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1951 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1945 | (words == 0)) { | 1952 | (words == 0)) { |
1946 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1953 | e_dbg("nvm parameter(s) out of bounds\n"); |
1947 | return -E1000_ERR_NVM; | 1954 | return -E1000_ERR_NVM; |
1948 | } | 1955 | } |
1949 | 1956 | ||
@@ -1986,11 +1993,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1986 | */ | 1993 | */ |
1987 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1994 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1988 | (words == 0)) { | 1995 | (words == 0)) { |
1989 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1996 | e_dbg("nvm parameter(s) out of bounds\n"); |
1990 | return -E1000_ERR_NVM; | 1997 | return -E1000_ERR_NVM; |
1991 | } | 1998 | } |
1992 | 1999 | ||
1993 | ret_val = nvm->ops.acquire_nvm(hw); | 2000 | ret_val = nvm->ops.acquire(hw); |
1994 | if (ret_val) | 2001 | if (ret_val) |
1995 | return ret_val; | 2002 | return ret_val; |
1996 | 2003 | ||
@@ -2001,7 +2008,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2001 | 2008 | ||
2002 | ret_val = e1000_ready_nvm_eeprom(hw); | 2009 | ret_val = e1000_ready_nvm_eeprom(hw); |
2003 | if (ret_val) { | 2010 | if (ret_val) { |
2004 | nvm->ops.release_nvm(hw); | 2011 | nvm->ops.release(hw); |
2005 | return ret_val; | 2012 | return ret_val; |
2006 | } | 2013 | } |
2007 | 2014 | ||
@@ -2040,7 +2047,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2040 | } | 2047 | } |
2041 | 2048 | ||
2042 | msleep(10); | 2049 | msleep(10); |
2043 | nvm->ops.release_nvm(hw); | 2050 | nvm->ops.release(hw); |
2044 | return 0; | 2051 | return 0; |
2045 | } | 2052 | } |
2046 | 2053 | ||
@@ -2066,7 +2073,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2066 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 2073 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
2067 | &mac_addr_offset); | 2074 | &mac_addr_offset); |
2068 | if (ret_val) { | 2075 | if (ret_val) { |
2069 | hw_dbg(hw, "NVM Read Error\n"); | 2076 | e_dbg("NVM Read Error\n"); |
2070 | return ret_val; | 2077 | return ret_val; |
2071 | } | 2078 | } |
2072 | if (mac_addr_offset == 0xFFFF) | 2079 | if (mac_addr_offset == 0xFFFF) |
@@ -2081,7 +2088,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2081 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | 2088 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, |
2082 | &nvm_data); | 2089 | &nvm_data); |
2083 | if (ret_val) { | 2090 | if (ret_val) { |
2084 | hw_dbg(hw, "NVM Read Error\n"); | 2091 | e_dbg("NVM Read Error\n"); |
2085 | return ret_val; | 2092 | return ret_val; |
2086 | } | 2093 | } |
2087 | if (nvm_data & 0x0001) | 2094 | if (nvm_data & 0x0001) |
@@ -2096,7 +2103,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2096 | offset = mac_addr_offset + (i >> 1); | 2103 | offset = mac_addr_offset + (i >> 1); |
2097 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | 2104 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); |
2098 | if (ret_val) { | 2105 | if (ret_val) { |
2099 | hw_dbg(hw, "NVM Read Error\n"); | 2106 | e_dbg("NVM Read Error\n"); |
2100 | return ret_val; | 2107 | return ret_val; |
2101 | } | 2108 | } |
2102 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); | 2109 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); |
@@ -2129,14 +2136,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | |||
2129 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 2136 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
2130 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2137 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2131 | if (ret_val) { | 2138 | if (ret_val) { |
2132 | hw_dbg(hw, "NVM Read Error\n"); | 2139 | e_dbg("NVM Read Error\n"); |
2133 | return ret_val; | 2140 | return ret_val; |
2134 | } | 2141 | } |
2135 | checksum += nvm_data; | 2142 | checksum += nvm_data; |
2136 | } | 2143 | } |
2137 | 2144 | ||
2138 | if (checksum != (u16) NVM_SUM) { | 2145 | if (checksum != (u16) NVM_SUM) { |
2139 | hw_dbg(hw, "NVM Checksum Invalid\n"); | 2146 | e_dbg("NVM Checksum Invalid\n"); |
2140 | return -E1000_ERR_NVM; | 2147 | return -E1000_ERR_NVM; |
2141 | } | 2148 | } |
2142 | 2149 | ||
@@ -2160,7 +2167,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2160 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | 2167 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
2161 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2168 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2162 | if (ret_val) { | 2169 | if (ret_val) { |
2163 | hw_dbg(hw, "NVM Read Error while updating checksum.\n"); | 2170 | e_dbg("NVM Read Error while updating checksum.\n"); |
2164 | return ret_val; | 2171 | return ret_val; |
2165 | } | 2172 | } |
2166 | checksum += nvm_data; | 2173 | checksum += nvm_data; |
@@ -2168,7 +2175,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2168 | checksum = (u16) NVM_SUM - checksum; | 2175 | checksum = (u16) NVM_SUM - checksum; |
2169 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | 2176 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
2170 | if (ret_val) | 2177 | if (ret_val) |
2171 | hw_dbg(hw, "NVM Write Error while updating checksum.\n"); | 2178 | e_dbg("NVM Write Error while updating checksum.\n"); |
2172 | 2179 | ||
2173 | return ret_val; | 2180 | return ret_val; |
2174 | } | 2181 | } |
@@ -2231,7 +2238,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2231 | /* Check that the host interface is enabled. */ | 2238 | /* Check that the host interface is enabled. */ |
2232 | hicr = er32(HICR); | 2239 | hicr = er32(HICR); |
2233 | if ((hicr & E1000_HICR_EN) == 0) { | 2240 | if ((hicr & E1000_HICR_EN) == 0) { |
2234 | hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); | 2241 | e_dbg("E1000_HOST_EN bit disabled.\n"); |
2235 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2242 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2236 | } | 2243 | } |
2237 | /* check the previous command is completed */ | 2244 | /* check the previous command is completed */ |
@@ -2243,7 +2250,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2243 | } | 2250 | } |
2244 | 2251 | ||
2245 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 2252 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
2246 | hw_dbg(hw, "Previous command timeout failed .\n"); | 2253 | e_dbg("Previous command timeout failed .\n"); |
2247 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2254 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2248 | } | 2255 | } |
2249 | 2256 | ||
@@ -2282,7 +2289,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2282 | 2289 | ||
2283 | /* No manageability, no filtering */ | 2290 | /* No manageability, no filtering */ |
2284 | if (!e1000e_check_mng_mode(hw)) { | 2291 | if (!e1000e_check_mng_mode(hw)) { |
2285 | hw->mac.tx_pkt_filtering = 0; | 2292 | hw->mac.tx_pkt_filtering = false; |
2286 | return 0; | 2293 | return 0; |
2287 | } | 2294 | } |
2288 | 2295 | ||
@@ -2292,7 +2299,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2292 | */ | 2299 | */ |
2293 | ret_val = e1000_mng_enable_host_if(hw); | 2300 | ret_val = e1000_mng_enable_host_if(hw); |
2294 | if (ret_val != 0) { | 2301 | if (ret_val != 0) { |
2295 | hw->mac.tx_pkt_filtering = 0; | 2302 | hw->mac.tx_pkt_filtering = false; |
2296 | return ret_val; | 2303 | return ret_val; |
2297 | } | 2304 | } |
2298 | 2305 | ||
@@ -2311,17 +2318,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2311 | * take the safe route of assuming Tx filtering is enabled. | 2318 | * take the safe route of assuming Tx filtering is enabled. |
2312 | */ | 2319 | */ |
2313 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { | 2320 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { |
2314 | hw->mac.tx_pkt_filtering = 1; | 2321 | hw->mac.tx_pkt_filtering = true; |
2315 | return 1; | 2322 | return 1; |
2316 | } | 2323 | } |
2317 | 2324 | ||
2318 | /* Cookie area is valid, make the final check for filtering. */ | 2325 | /* Cookie area is valid, make the final check for filtering. */ |
2319 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { | 2326 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { |
2320 | hw->mac.tx_pkt_filtering = 0; | 2327 | hw->mac.tx_pkt_filtering = false; |
2321 | return 0; | 2328 | return 0; |
2322 | } | 2329 | } |
2323 | 2330 | ||
2324 | hw->mac.tx_pkt_filtering = 1; | 2331 | hw->mac.tx_pkt_filtering = true; |
2325 | return 1; | 2332 | return 1; |
2326 | } | 2333 | } |
2327 | 2334 | ||
@@ -2353,7 +2360,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, | |||
2353 | } | 2360 | } |
2354 | 2361 | ||
2355 | /** | 2362 | /** |
2356 | * e1000_mng_host_if_write - Writes to the manageability host interface | 2363 | * e1000_mng_host_if_write - Write to the manageability host interface |
2357 | * @hw: pointer to the HW structure | 2364 | * @hw: pointer to the HW structure |
2358 | * @buffer: pointer to the host interface buffer | 2365 | * @buffer: pointer to the host interface buffer |
2359 | * @length: size of the buffer | 2366 | * @length: size of the buffer |
@@ -2478,7 +2485,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2478 | { | 2485 | { |
2479 | u32 manc; | 2486 | u32 manc; |
2480 | u32 fwsm, factps; | 2487 | u32 fwsm, factps; |
2481 | bool ret_val = 0; | 2488 | bool ret_val = false; |
2482 | 2489 | ||
2483 | manc = er32(MANC); | 2490 | manc = er32(MANC); |
2484 | 2491 | ||
@@ -2493,13 +2500,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2493 | if (!(factps & E1000_FACTPS_MNGCG) && | 2500 | if (!(factps & E1000_FACTPS_MNGCG) && |
2494 | ((fwsm & E1000_FWSM_MODE_MASK) == | 2501 | ((fwsm & E1000_FWSM_MODE_MASK) == |
2495 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { | 2502 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { |
2496 | ret_val = 1; | 2503 | ret_val = true; |
2497 | return ret_val; | 2504 | return ret_val; |
2498 | } | 2505 | } |
2499 | } else { | 2506 | } else { |
2500 | if ((manc & E1000_MANC_SMBUS_EN) && | 2507 | if ((manc & E1000_MANC_SMBUS_EN) && |
2501 | !(manc & E1000_MANC_ASF_EN)) { | 2508 | !(manc & E1000_MANC_ASF_EN)) { |
2502 | ret_val = 1; | 2509 | ret_val = true; |
2503 | return ret_val; | 2510 | return ret_val; |
2504 | } | 2511 | } |
2505 | } | 2512 | } |
@@ -2514,14 +2521,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) | |||
2514 | 2521 | ||
2515 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | 2522 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
2516 | if (ret_val) { | 2523 | if (ret_val) { |
2517 | hw_dbg(hw, "NVM Read Error\n"); | 2524 | e_dbg("NVM Read Error\n"); |
2518 | return ret_val; | 2525 | return ret_val; |
2519 | } | 2526 | } |
2520 | *pba_num = (u32)(nvm_data << 16); | 2527 | *pba_num = (u32)(nvm_data << 16); |
2521 | 2528 | ||
2522 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2529 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2523 | if (ret_val) { | 2530 | if (ret_val) { |
2524 | hw_dbg(hw, "NVM Read Error\n"); | 2531 | e_dbg("NVM Read Error\n"); |
2525 | return ret_val; | 2532 | return ret_val; |
2526 | } | 2533 | } |
2527 | *pba_num |= nvm_data; | 2534 | *pba_num |= nvm_data; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 0687c6aa4e46..c3105c5087e0 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
65 | [board_pchlan] = &e1000_pch_info, | 65 | [board_pchlan] = &e1000_pch_info, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | #ifdef DEBUG | ||
69 | /** | ||
70 | * e1000_get_hw_dev_name - return device name string | ||
71 | * used by hardware layer to print debugging information | ||
72 | **/ | ||
73 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | ||
74 | { | ||
75 | return hw->adapter->netdev->name; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | /** | 68 | /** |
80 | * e1000_desc_unused - calculate if we have unused descriptors | 69 | * e1000_desc_unused - calculate if we have unused descriptors |
81 | **/ | 70 | **/ |
@@ -167,7 +156,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
167 | struct e1000_buffer *buffer_info; | 156 | struct e1000_buffer *buffer_info; |
168 | struct sk_buff *skb; | 157 | struct sk_buff *skb; |
169 | unsigned int i; | 158 | unsigned int i; |
170 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | 159 | unsigned int bufsz = adapter->rx_buffer_len; |
171 | 160 | ||
172 | i = rx_ring->next_to_use; | 161 | i = rx_ring->next_to_use; |
173 | buffer_info = &rx_ring->buffer_info[i]; | 162 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -179,20 +168,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
179 | goto map_skb; | 168 | goto map_skb; |
180 | } | 169 | } |
181 | 170 | ||
182 | skb = netdev_alloc_skb(netdev, bufsz); | 171 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
183 | if (!skb) { | 172 | if (!skb) { |
184 | /* Better luck next round */ | 173 | /* Better luck next round */ |
185 | adapter->alloc_rx_buff_failed++; | 174 | adapter->alloc_rx_buff_failed++; |
186 | break; | 175 | break; |
187 | } | 176 | } |
188 | 177 | ||
189 | /* | ||
190 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
191 | * this will result in a 16 byte aligned IP header after | ||
192 | * the 14 byte MAC header is removed | ||
193 | */ | ||
194 | skb_reserve(skb, NET_IP_ALIGN); | ||
195 | |||
196 | buffer_info->skb = skb; | 178 | buffer_info->skb = skb; |
197 | map_skb: | 179 | map_skb: |
198 | buffer_info->dma = pci_map_single(pdev, skb->data, | 180 | buffer_info->dma = pci_map_single(pdev, skb->data, |
@@ -284,21 +266,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
284 | cpu_to_le64(ps_page->dma); | 266 | cpu_to_le64(ps_page->dma); |
285 | } | 267 | } |
286 | 268 | ||
287 | skb = netdev_alloc_skb(netdev, | 269 | skb = netdev_alloc_skb_ip_align(netdev, |
288 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 270 | adapter->rx_ps_bsize0); |
289 | 271 | ||
290 | if (!skb) { | 272 | if (!skb) { |
291 | adapter->alloc_rx_buff_failed++; | 273 | adapter->alloc_rx_buff_failed++; |
292 | break; | 274 | break; |
293 | } | 275 | } |
294 | 276 | ||
295 | /* | ||
296 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
297 | * this will result in a 16 byte aligned IP header after | ||
298 | * the 14 byte MAC header is removed | ||
299 | */ | ||
300 | skb_reserve(skb, NET_IP_ALIGN); | ||
301 | |||
302 | buffer_info->skb = skb; | 277 | buffer_info->skb = skb; |
303 | buffer_info->dma = pci_map_single(pdev, skb->data, | 278 | buffer_info->dma = pci_map_single(pdev, skb->data, |
304 | adapter->rx_ps_bsize0, | 279 | adapter->rx_ps_bsize0, |
@@ -359,9 +334,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
359 | struct e1000_buffer *buffer_info; | 334 | struct e1000_buffer *buffer_info; |
360 | struct sk_buff *skb; | 335 | struct sk_buff *skb; |
361 | unsigned int i; | 336 | unsigned int i; |
362 | unsigned int bufsz = 256 - | 337 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; |
363 | 16 /* for skb_reserve */ - | ||
364 | NET_IP_ALIGN; | ||
365 | 338 | ||
366 | i = rx_ring->next_to_use; | 339 | i = rx_ring->next_to_use; |
367 | buffer_info = &rx_ring->buffer_info[i]; | 340 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -373,19 +346,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
373 | goto check_page; | 346 | goto check_page; |
374 | } | 347 | } |
375 | 348 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | 349 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
377 | if (unlikely(!skb)) { | 350 | if (unlikely(!skb)) { |
378 | /* Better luck next round */ | 351 | /* Better luck next round */ |
379 | adapter->alloc_rx_buff_failed++; | 352 | adapter->alloc_rx_buff_failed++; |
380 | break; | 353 | break; |
381 | } | 354 | } |
382 | 355 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
384 | * this will result in a 16 byte aligned IP header after | ||
385 | * the 14 byte MAC header is removed | ||
386 | */ | ||
387 | skb_reserve(skb, NET_IP_ALIGN); | ||
388 | |||
389 | buffer_info->skb = skb; | 356 | buffer_info->skb = skb; |
390 | check_page: | 357 | check_page: |
391 | /* allocate a new page if necessary */ | 358 | /* allocate a new page if necessary */ |
@@ -437,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
437 | { | 404 | { |
438 | struct net_device *netdev = adapter->netdev; | 405 | struct net_device *netdev = adapter->netdev; |
439 | struct pci_dev *pdev = adapter->pdev; | 406 | struct pci_dev *pdev = adapter->pdev; |
407 | struct e1000_hw *hw = &adapter->hw; | ||
440 | struct e1000_ring *rx_ring = adapter->rx_ring; | 408 | struct e1000_ring *rx_ring = adapter->rx_ring; |
441 | struct e1000_rx_desc *rx_desc, *next_rxd; | 409 | struct e1000_rx_desc *rx_desc, *next_rxd; |
442 | struct e1000_buffer *buffer_info, *next_buffer; | 410 | struct e1000_buffer *buffer_info, *next_buffer; |
@@ -486,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
486 | * packet, also make sure the frame isn't just CRC only */ | 454 | * packet, also make sure the frame isn't just CRC only */ |
487 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
488 | /* All receives must fit into a single buffer */ | 456 | /* All receives must fit into a single buffer */ |
489 | e_dbg("%s: Receive packet consumed multiple buffers\n", | 457 | e_dbg("Receive packet consumed multiple buffers\n"); |
490 | netdev->name); | ||
491 | /* recycle */ | 458 | /* recycle */ |
492 | buffer_info->skb = skb; | 459 | buffer_info->skb = skb; |
493 | goto next_desc; | 460 | goto next_desc; |
@@ -513,9 +480,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
513 | */ | 480 | */ |
514 | if (length < copybreak) { | 481 | if (length < copybreak) { |
515 | struct sk_buff *new_skb = | 482 | struct sk_buff *new_skb = |
516 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 483 | netdev_alloc_skb_ip_align(netdev, length); |
517 | if (new_skb) { | 484 | if (new_skb) { |
518 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
519 | skb_copy_to_linear_data_offset(new_skb, | 485 | skb_copy_to_linear_data_offset(new_skb, |
520 | -NET_IP_ALIGN, | 486 | -NET_IP_ALIGN, |
521 | (skb->data - | 487 | (skb->data - |
@@ -560,33 +526,52 @@ next_desc: | |||
560 | 526 | ||
561 | adapter->total_rx_bytes += total_rx_bytes; | 527 | adapter->total_rx_bytes += total_rx_bytes; |
562 | adapter->total_rx_packets += total_rx_packets; | 528 | adapter->total_rx_packets += total_rx_packets; |
563 | adapter->net_stats.rx_bytes += total_rx_bytes; | 529 | netdev->stats.rx_bytes += total_rx_bytes; |
564 | adapter->net_stats.rx_packets += total_rx_packets; | 530 | netdev->stats.rx_packets += total_rx_packets; |
565 | return cleaned; | 531 | return cleaned; |
566 | } | 532 | } |
567 | 533 | ||
568 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | 534 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
569 | struct e1000_buffer *buffer_info) | 535 | struct e1000_buffer *buffer_info) |
570 | { | 536 | { |
571 | buffer_info->dma = 0; | 537 | if (buffer_info->dma) { |
538 | if (buffer_info->mapped_as_page) | ||
539 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
540 | buffer_info->length, PCI_DMA_TODEVICE); | ||
541 | else | ||
542 | pci_unmap_single(adapter->pdev, buffer_info->dma, | ||
543 | buffer_info->length, | ||
544 | PCI_DMA_TODEVICE); | ||
545 | buffer_info->dma = 0; | ||
546 | } | ||
572 | if (buffer_info->skb) { | 547 | if (buffer_info->skb) { |
573 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
574 | DMA_TO_DEVICE); | ||
575 | dev_kfree_skb_any(buffer_info->skb); | 548 | dev_kfree_skb_any(buffer_info->skb); |
576 | buffer_info->skb = NULL; | 549 | buffer_info->skb = NULL; |
577 | } | 550 | } |
578 | buffer_info->time_stamp = 0; | 551 | buffer_info->time_stamp = 0; |
579 | } | 552 | } |
580 | 553 | ||
581 | static void e1000_print_tx_hang(struct e1000_adapter *adapter) | 554 | static void e1000_print_hw_hang(struct work_struct *work) |
582 | { | 555 | { |
556 | struct e1000_adapter *adapter = container_of(work, | ||
557 | struct e1000_adapter, | ||
558 | print_hang_task); | ||
583 | struct e1000_ring *tx_ring = adapter->tx_ring; | 559 | struct e1000_ring *tx_ring = adapter->tx_ring; |
584 | unsigned int i = tx_ring->next_to_clean; | 560 | unsigned int i = tx_ring->next_to_clean; |
585 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | 561 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
586 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | 562 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
563 | struct e1000_hw *hw = &adapter->hw; | ||
564 | u16 phy_status, phy_1000t_status, phy_ext_status; | ||
565 | u16 pci_status; | ||
566 | |||
567 | e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
568 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | ||
569 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | ||
570 | |||
571 | pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); | ||
587 | 572 | ||
588 | /* detected Tx unit hang */ | 573 | /* detected Hardware unit hang */ |
589 | e_err("Detected Tx Unit Hang:\n" | 574 | e_err("Detected Hardware Unit Hang:\n" |
590 | " TDH <%x>\n" | 575 | " TDH <%x>\n" |
591 | " TDT <%x>\n" | 576 | " TDT <%x>\n" |
592 | " next_to_use <%x>\n" | 577 | " next_to_use <%x>\n" |
@@ -595,7 +580,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
595 | " time_stamp <%lx>\n" | 580 | " time_stamp <%lx>\n" |
596 | " next_to_watch <%x>\n" | 581 | " next_to_watch <%x>\n" |
597 | " jiffies <%lx>\n" | 582 | " jiffies <%lx>\n" |
598 | " next_to_watch.status <%x>\n", | 583 | " next_to_watch.status <%x>\n" |
584 | "MAC Status <%x>\n" | ||
585 | "PHY Status <%x>\n" | ||
586 | "PHY 1000BASE-T Status <%x>\n" | ||
587 | "PHY Extended Status <%x>\n" | ||
588 | "PCI Status <%x>\n", | ||
599 | readl(adapter->hw.hw_addr + tx_ring->head), | 589 | readl(adapter->hw.hw_addr + tx_ring->head), |
600 | readl(adapter->hw.hw_addr + tx_ring->tail), | 590 | readl(adapter->hw.hw_addr + tx_ring->tail), |
601 | tx_ring->next_to_use, | 591 | tx_ring->next_to_use, |
@@ -603,7 +593,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
603 | tx_ring->buffer_info[eop].time_stamp, | 593 | tx_ring->buffer_info[eop].time_stamp, |
604 | eop, | 594 | eop, |
605 | jiffies, | 595 | jiffies, |
606 | eop_desc->upper.fields.status); | 596 | eop_desc->upper.fields.status, |
597 | er32(STATUS), | ||
598 | phy_status, | ||
599 | phy_1000t_status, | ||
600 | phy_ext_status, | ||
601 | pci_status); | ||
607 | } | 602 | } |
608 | 603 | ||
609 | /** | 604 | /** |
@@ -677,21 +672,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
677 | } | 672 | } |
678 | 673 | ||
679 | if (adapter->detect_tx_hung) { | 674 | if (adapter->detect_tx_hung) { |
680 | /* Detect a transmit hang in hardware, this serializes the | 675 | /* |
681 | * check with the clearing of time_stamp and movement of i */ | 676 | * Detect a transmit hang in hardware, this serializes the |
677 | * check with the clearing of time_stamp and movement of i | ||
678 | */ | ||
682 | adapter->detect_tx_hung = 0; | 679 | adapter->detect_tx_hung = 0; |
683 | if (tx_ring->buffer_info[i].time_stamp && | 680 | if (tx_ring->buffer_info[i].time_stamp && |
684 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp | 681 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp |
685 | + (adapter->tx_timeout_factor * HZ)) | 682 | + (adapter->tx_timeout_factor * HZ)) && |
686 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 683 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
687 | e1000_print_tx_hang(adapter); | 684 | schedule_work(&adapter->print_hang_task); |
688 | netif_stop_queue(netdev); | 685 | netif_stop_queue(netdev); |
689 | } | 686 | } |
690 | } | 687 | } |
691 | adapter->total_tx_bytes += total_tx_bytes; | 688 | adapter->total_tx_bytes += total_tx_bytes; |
692 | adapter->total_tx_packets += total_tx_packets; | 689 | adapter->total_tx_packets += total_tx_packets; |
693 | adapter->net_stats.tx_bytes += total_tx_bytes; | 690 | netdev->stats.tx_bytes += total_tx_bytes; |
694 | adapter->net_stats.tx_packets += total_tx_packets; | 691 | netdev->stats.tx_packets += total_tx_packets; |
695 | return (count < tx_ring->count); | 692 | return (count < tx_ring->count); |
696 | } | 693 | } |
697 | 694 | ||
@@ -705,6 +702,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
705 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 702 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
706 | int *work_done, int work_to_do) | 703 | int *work_done, int work_to_do) |
707 | { | 704 | { |
705 | struct e1000_hw *hw = &adapter->hw; | ||
708 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 706 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
709 | struct net_device *netdev = adapter->netdev; | 707 | struct net_device *netdev = adapter->netdev; |
710 | struct pci_dev *pdev = adapter->pdev; | 708 | struct pci_dev *pdev = adapter->pdev; |
@@ -748,8 +746,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
748 | buffer_info->dma = 0; | 746 | buffer_info->dma = 0; |
749 | 747 | ||
750 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 748 | if (!(staterr & E1000_RXD_STAT_EOP)) { |
751 | e_dbg("%s: Packet Split buffers didn't pick up the " | 749 | e_dbg("Packet Split buffers didn't pick up the full " |
752 | "full packet\n", netdev->name); | 750 | "packet\n"); |
753 | dev_kfree_skb_irq(skb); | 751 | dev_kfree_skb_irq(skb); |
754 | goto next_desc; | 752 | goto next_desc; |
755 | } | 753 | } |
@@ -762,8 +760,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
762 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 760 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
763 | 761 | ||
764 | if (!length) { | 762 | if (!length) { |
765 | e_dbg("%s: Last part of the packet spanning multiple " | 763 | e_dbg("Last part of the packet spanning multiple " |
766 | "descriptors\n", netdev->name); | 764 | "descriptors\n"); |
767 | dev_kfree_skb_irq(skb); | 765 | dev_kfree_skb_irq(skb); |
768 | goto next_desc; | 766 | goto next_desc; |
769 | } | 767 | } |
@@ -871,8 +869,8 @@ next_desc: | |||
871 | 869 | ||
872 | adapter->total_rx_bytes += total_rx_bytes; | 870 | adapter->total_rx_bytes += total_rx_bytes; |
873 | adapter->total_rx_packets += total_rx_packets; | 871 | adapter->total_rx_packets += total_rx_packets; |
874 | adapter->net_stats.rx_bytes += total_rx_bytes; | 872 | netdev->stats.rx_bytes += total_rx_bytes; |
875 | adapter->net_stats.rx_packets += total_rx_packets; | 873 | netdev->stats.rx_packets += total_rx_packets; |
876 | return cleaned; | 874 | return cleaned; |
877 | } | 875 | } |
878 | 876 | ||
@@ -1051,8 +1049,8 @@ next_desc: | |||
1051 | 1049 | ||
1052 | adapter->total_rx_bytes += total_rx_bytes; | 1050 | adapter->total_rx_bytes += total_rx_bytes; |
1053 | adapter->total_rx_packets += total_rx_packets; | 1051 | adapter->total_rx_packets += total_rx_packets; |
1054 | adapter->net_stats.rx_bytes += total_rx_bytes; | 1052 | netdev->stats.rx_bytes += total_rx_bytes; |
1055 | adapter->net_stats.rx_packets += total_rx_packets; | 1053 | netdev->stats.rx_packets += total_rx_packets; |
1056 | return cleaned; | 1054 | return cleaned; |
1057 | } | 1055 | } |
1058 | 1056 | ||
@@ -1199,7 +1197,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1199 | struct e1000_hw *hw = &adapter->hw; | 1197 | struct e1000_hw *hw = &adapter->hw; |
1200 | u32 rctl, icr = er32(ICR); | 1198 | u32 rctl, icr = er32(ICR); |
1201 | 1199 | ||
1202 | if (!icr) | 1200 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) |
1203 | return IRQ_NONE; /* Not our interrupt */ | 1201 | return IRQ_NONE; /* Not our interrupt */ |
1204 | 1202 | ||
1205 | /* | 1203 | /* |
@@ -1481,7 +1479,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1481 | else | 1479 | else |
1482 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | 1480 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); |
1483 | err = request_irq(adapter->msix_entries[vector].vector, | 1481 | err = request_irq(adapter->msix_entries[vector].vector, |
1484 | &e1000_intr_msix_rx, 0, adapter->rx_ring->name, | 1482 | e1000_intr_msix_rx, 0, adapter->rx_ring->name, |
1485 | netdev); | 1483 | netdev); |
1486 | if (err) | 1484 | if (err) |
1487 | goto out; | 1485 | goto out; |
@@ -1494,7 +1492,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1494 | else | 1492 | else |
1495 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | 1493 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); |
1496 | err = request_irq(adapter->msix_entries[vector].vector, | 1494 | err = request_irq(adapter->msix_entries[vector].vector, |
1497 | &e1000_intr_msix_tx, 0, adapter->tx_ring->name, | 1495 | e1000_intr_msix_tx, 0, adapter->tx_ring->name, |
1498 | netdev); | 1496 | netdev); |
1499 | if (err) | 1497 | if (err) |
1500 | goto out; | 1498 | goto out; |
@@ -1503,7 +1501,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1503 | vector++; | 1501 | vector++; |
1504 | 1502 | ||
1505 | err = request_irq(adapter->msix_entries[vector].vector, | 1503 | err = request_irq(adapter->msix_entries[vector].vector, |
1506 | &e1000_msix_other, 0, netdev->name, netdev); | 1504 | e1000_msix_other, 0, netdev->name, netdev); |
1507 | if (err) | 1505 | if (err) |
1508 | goto out; | 1506 | goto out; |
1509 | 1507 | ||
@@ -1534,7 +1532,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1534 | e1000e_set_interrupt_capability(adapter); | 1532 | e1000e_set_interrupt_capability(adapter); |
1535 | } | 1533 | } |
1536 | if (adapter->flags & FLAG_MSI_ENABLED) { | 1534 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1537 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, | 1535 | err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, |
1538 | netdev->name, netdev); | 1536 | netdev->name, netdev); |
1539 | if (!err) | 1537 | if (!err) |
1540 | return err; | 1538 | return err; |
@@ -1544,7 +1542,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1544 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | 1542 | adapter->int_mode = E1000E_INT_MODE_LEGACY; |
1545 | } | 1543 | } |
1546 | 1544 | ||
1547 | err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, | 1545 | err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, |
1548 | netdev->name, netdev); | 1546 | netdev->name, netdev); |
1549 | if (err) | 1547 | if (err) |
1550 | e_err("Unable to allocate interrupt, Error: %d\n", err); | 1548 | e_err("Unable to allocate interrupt, Error: %d\n", err); |
@@ -2040,11 +2038,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2040 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2038 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2041 | (vid == adapter->mng_vlan_id)) | 2039 | (vid == adapter->mng_vlan_id)) |
2042 | return; | 2040 | return; |
2041 | |||
2043 | /* add VID to filter table */ | 2042 | /* add VID to filter table */ |
2044 | index = (vid >> 5) & 0x7F; | 2043 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2045 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2044 | index = (vid >> 5) & 0x7F; |
2046 | vfta |= (1 << (vid & 0x1F)); | 2045 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2047 | e1000e_write_vfta(hw, index, vfta); | 2046 | vfta |= (1 << (vid & 0x1F)); |
2047 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2048 | } | ||
2048 | } | 2049 | } |
2049 | 2050 | ||
2050 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 2051 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
@@ -2069,10 +2070,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2069 | } | 2070 | } |
2070 | 2071 | ||
2071 | /* remove VID from filter table */ | 2072 | /* remove VID from filter table */ |
2072 | index = (vid >> 5) & 0x7F; | 2073 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
2073 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | 2074 | index = (vid >> 5) & 0x7F; |
2074 | vfta &= ~(1 << (vid & 0x1F)); | 2075 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
2075 | e1000e_write_vfta(hw, index, vfta); | 2076 | vfta &= ~(1 << (vid & 0x1F)); |
2077 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2078 | } | ||
2076 | } | 2079 | } |
2077 | 2080 | ||
2078 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | 2081 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
@@ -2464,8 +2467,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2464 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | 2467 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2465 | 2468 | ||
2466 | ctrl_ext = er32(CTRL_EXT); | 2469 | ctrl_ext = er32(CTRL_EXT); |
2467 | /* Reset delay timers after every interrupt */ | ||
2468 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | ||
2469 | /* Auto-Mask interrupts upon ICR access */ | 2470 | /* Auto-Mask interrupts upon ICR access */ |
2470 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2471 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2471 | ew32(IAM, 0xffffffff); | 2472 | ew32(IAM, 0xffffffff); |
@@ -2507,21 +2508,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2507 | * packet size is equal or larger than the specified value (in 8 byte | 2508 | * packet size is equal or larger than the specified value (in 8 byte |
2508 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | 2509 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
2509 | */ | 2510 | */ |
2510 | if ((adapter->flags & FLAG_HAS_ERT) && | 2511 | if (adapter->flags & FLAG_HAS_ERT) { |
2511 | (adapter->netdev->mtu > ETH_DATA_LEN)) { | 2512 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2512 | u32 rxdctl = er32(RXDCTL(0)); | 2513 | u32 rxdctl = er32(RXDCTL(0)); |
2513 | ew32(RXDCTL(0), rxdctl | 0x3); | 2514 | ew32(RXDCTL(0), rxdctl | 0x3); |
2514 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | 2515 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); |
2515 | /* | 2516 | /* |
2516 | * With jumbo frames and early-receive enabled, excessive | 2517 | * With jumbo frames and early-receive enabled, |
2517 | * C4->C2 latencies result in dropped transactions. | 2518 | * excessive C-state transition latencies result in |
2518 | */ | 2519 | * dropped transactions. |
2519 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2520 | */ |
2520 | e1000e_driver_name, 55); | 2521 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2521 | } else { | 2522 | adapter->netdev->name, 55); |
2522 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2523 | } else { |
2523 | e1000e_driver_name, | 2524 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2524 | PM_QOS_DEFAULT_VALUE); | 2525 | adapter->netdev->name, |
2526 | PM_QOS_DEFAULT_VALUE); | ||
2527 | } | ||
2525 | } | 2528 | } |
2526 | 2529 | ||
2527 | /* Enable Receives */ | 2530 | /* Enable Receives */ |
@@ -2645,18 +2648,8 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
2645 | **/ | 2648 | **/ |
2646 | void e1000e_power_up_phy(struct e1000_adapter *adapter) | 2649 | void e1000e_power_up_phy(struct e1000_adapter *adapter) |
2647 | { | 2650 | { |
2648 | u16 mii_reg = 0; | 2651 | if (adapter->hw.phy.ops.power_up) |
2649 | 2652 | adapter->hw.phy.ops.power_up(&adapter->hw); | |
2650 | /* Just clear the power down bit to wake the phy back up */ | ||
2651 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { | ||
2652 | /* | ||
2653 | * According to the manual, the phy will retain its | ||
2654 | * settings across a power-down/up cycle | ||
2655 | */ | ||
2656 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); | ||
2657 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2658 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); | ||
2659 | } | ||
2660 | 2653 | ||
2661 | adapter->hw.mac.ops.setup_link(&adapter->hw); | 2654 | adapter->hw.mac.ops.setup_link(&adapter->hw); |
2662 | } | 2655 | } |
@@ -2664,35 +2657,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) | |||
2664 | /** | 2657 | /** |
2665 | * e1000_power_down_phy - Power down the PHY | 2658 | * e1000_power_down_phy - Power down the PHY |
2666 | * | 2659 | * |
2667 | * Power down the PHY so no link is implied when interface is down | 2660 | * Power down the PHY so no link is implied when interface is down. |
2668 | * The PHY cannot be powered down is management or WoL is active | 2661 | * The PHY cannot be powered down if management or WoL is active. |
2669 | */ | 2662 | */ |
2670 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 2663 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
2671 | { | 2664 | { |
2672 | struct e1000_hw *hw = &adapter->hw; | ||
2673 | u16 mii_reg; | ||
2674 | |||
2675 | /* WoL is enabled */ | 2665 | /* WoL is enabled */ |
2676 | if (adapter->wol) | 2666 | if (adapter->wol) |
2677 | return; | 2667 | return; |
2678 | 2668 | ||
2679 | /* non-copper PHY? */ | 2669 | if (adapter->hw.phy.ops.power_down) |
2680 | if (adapter->hw.phy.media_type != e1000_media_type_copper) | 2670 | adapter->hw.phy.ops.power_down(&adapter->hw); |
2681 | return; | ||
2682 | |||
2683 | /* reset is blocked because of a SoL/IDER session */ | ||
2684 | if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) | ||
2685 | return; | ||
2686 | |||
2687 | /* manageability (AMT) is enabled */ | ||
2688 | if (er32(MANC) & E1000_MANC_SMBUS_EN) | ||
2689 | return; | ||
2690 | |||
2691 | /* power down the PHY */ | ||
2692 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2693 | mii_reg |= MII_CR_POWER_DOWN; | ||
2694 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2695 | mdelay(1); | ||
2696 | } | 2671 | } |
2697 | 2672 | ||
2698 | /** | 2673 | /** |
@@ -2769,25 +2744,38 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2769 | /* | 2744 | /* |
2770 | * flow control settings | 2745 | * flow control settings |
2771 | * | 2746 | * |
2772 | * The high water mark must be low enough to fit two full frame | 2747 | * The high water mark must be low enough to fit one full frame |
2773 | * (or the size used for early receive) above it in the Rx FIFO. | 2748 | * (or the size used for early receive) above it in the Rx FIFO. |
2774 | * Set it to the lower of: | 2749 | * Set it to the lower of: |
2775 | * - 90% of the Rx FIFO size, and | 2750 | * - 90% of the Rx FIFO size, and |
2776 | * - the full Rx FIFO size minus the early receive size (for parts | 2751 | * - the full Rx FIFO size minus the early receive size (for parts |
2777 | * with ERT support assuming ERT set to E1000_ERT_2048), or | 2752 | * with ERT support assuming ERT set to E1000_ERT_2048), or |
2778 | * - the full Rx FIFO size minus two full frames | 2753 | * - the full Rx FIFO size minus one full frame |
2779 | */ | 2754 | */ |
2780 | if ((adapter->flags & FLAG_HAS_ERT) && | 2755 | if (hw->mac.type == e1000_pchlan) { |
2781 | (adapter->netdev->mtu > ETH_DATA_LEN)) | 2756 | /* |
2782 | hwm = min(((pba << 10) * 9 / 10), | 2757 | * Workaround PCH LOM adapter hangs with certain network |
2783 | ((pba << 10) - (E1000_ERT_2048 << 3))); | 2758 | * loads. If hangs persist, try disabling Tx flow control. |
2784 | else | 2759 | */ |
2785 | hwm = min(((pba << 10) * 9 / 10), | 2760 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2786 | ((pba << 10) - (2 * adapter->max_frame_size))); | 2761 | fc->high_water = 0x3500; |
2762 | fc->low_water = 0x1500; | ||
2763 | } else { | ||
2764 | fc->high_water = 0x5000; | ||
2765 | fc->low_water = 0x3000; | ||
2766 | } | ||
2767 | } else { | ||
2768 | if ((adapter->flags & FLAG_HAS_ERT) && | ||
2769 | (adapter->netdev->mtu > ETH_DATA_LEN)) | ||
2770 | hwm = min(((pba << 10) * 9 / 10), | ||
2771 | ((pba << 10) - (E1000_ERT_2048 << 3))); | ||
2772 | else | ||
2773 | hwm = min(((pba << 10) * 9 / 10), | ||
2774 | ((pba << 10) - adapter->max_frame_size)); | ||
2787 | 2775 | ||
2788 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | 2776 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ |
2789 | fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); | 2777 | fc->low_water = fc->high_water - 8; |
2790 | fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ | 2778 | } |
2791 | 2779 | ||
2792 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | 2780 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) |
2793 | fc->pause_time = 0xFFFF; | 2781 | fc->pause_time = 0xFFFF; |
@@ -2813,6 +2801,10 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2813 | if (mac->ops.init_hw(hw)) | 2801 | if (mac->ops.init_hw(hw)) |
2814 | e_err("Hardware Error\n"); | 2802 | e_err("Hardware Error\n"); |
2815 | 2803 | ||
2804 | /* additional part of the flow-control workaround above */ | ||
2805 | if (hw->mac.type == e1000_pchlan) | ||
2806 | ew32(FCRTV_PCH, 0x1000); | ||
2807 | |||
2816 | e1000_update_mng_vlan(adapter); | 2808 | e1000_update_mng_vlan(adapter); |
2817 | 2809 | ||
2818 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 2810 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
@@ -2839,6 +2831,12 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
2839 | { | 2831 | { |
2840 | struct e1000_hw *hw = &adapter->hw; | 2832 | struct e1000_hw *hw = &adapter->hw; |
2841 | 2833 | ||
2834 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | ||
2835 | if (adapter->flags & FLAG_HAS_ERT) | ||
2836 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2837 | adapter->netdev->name, | ||
2838 | PM_QOS_DEFAULT_VALUE); | ||
2839 | |||
2842 | /* hardware has been reset, we need to reload some things */ | 2840 | /* hardware has been reset, we need to reload some things */ |
2843 | e1000_configure(adapter); | 2841 | e1000_configure(adapter); |
2844 | 2842 | ||
@@ -2899,6 +2897,10 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2899 | e1000_clean_tx_ring(adapter); | 2897 | e1000_clean_tx_ring(adapter); |
2900 | e1000_clean_rx_ring(adapter); | 2898 | e1000_clean_rx_ring(adapter); |
2901 | 2899 | ||
2900 | if (adapter->flags & FLAG_HAS_ERT) | ||
2901 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2902 | adapter->netdev->name); | ||
2903 | |||
2902 | /* | 2904 | /* |
2903 | * TODO: for power management, we could drop the link and | 2905 | * TODO: for power management, we could drop the link and |
2904 | * pci_disable_device here. | 2906 | * pci_disable_device here. |
@@ -2956,7 +2958,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
2956 | struct e1000_hw *hw = &adapter->hw; | 2958 | struct e1000_hw *hw = &adapter->hw; |
2957 | u32 icr = er32(ICR); | 2959 | u32 icr = er32(ICR); |
2958 | 2960 | ||
2959 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | 2961 | e_dbg("icr is %08X\n", icr); |
2960 | if (icr & E1000_ICR_RXSEQ) { | 2962 | if (icr & E1000_ICR_RXSEQ) { |
2961 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 2963 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
2962 | wmb(); | 2964 | wmb(); |
@@ -2993,7 +2995,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
2993 | if (err) | 2995 | if (err) |
2994 | goto msi_test_failed; | 2996 | goto msi_test_failed; |
2995 | 2997 | ||
2996 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | 2998 | err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, |
2997 | netdev->name, netdev); | 2999 | netdev->name, netdev); |
2998 | if (err) { | 3000 | if (err) { |
2999 | pci_disable_msi(adapter->pdev); | 3001 | pci_disable_msi(adapter->pdev); |
@@ -3026,7 +3028,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3026 | goto msi_test_failed; | 3028 | goto msi_test_failed; |
3027 | 3029 | ||
3028 | /* okay so the test worked, restore settings */ | 3030 | /* okay so the test worked, restore settings */ |
3029 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | 3031 | e_dbg("MSI interrupt test succeeded!\n"); |
3030 | msi_test_failed: | 3032 | msi_test_failed: |
3031 | e1000e_set_interrupt_capability(adapter); | 3033 | e1000e_set_interrupt_capability(adapter); |
3032 | e1000_request_irq(adapter); | 3034 | e1000_request_irq(adapter); |
@@ -3287,6 +3289,7 @@ static void e1000_update_phy_info(unsigned long data) | |||
3287 | **/ | 3289 | **/ |
3288 | void e1000e_update_stats(struct e1000_adapter *adapter) | 3290 | void e1000e_update_stats(struct e1000_adapter *adapter) |
3289 | { | 3291 | { |
3292 | struct net_device *netdev = adapter->netdev; | ||
3290 | struct e1000_hw *hw = &adapter->hw; | 3293 | struct e1000_hw *hw = &adapter->hw; |
3291 | struct pci_dev *pdev = adapter->pdev; | 3294 | struct pci_dev *pdev = adapter->pdev; |
3292 | u16 phy_data; | 3295 | u16 phy_data; |
@@ -3381,8 +3384,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3381 | adapter->stats.tsctfc += er32(TSCTFC); | 3384 | adapter->stats.tsctfc += er32(TSCTFC); |
3382 | 3385 | ||
3383 | /* Fill out the OS statistics structure */ | 3386 | /* Fill out the OS statistics structure */ |
3384 | adapter->net_stats.multicast = adapter->stats.mprc; | 3387 | netdev->stats.multicast = adapter->stats.mprc; |
3385 | adapter->net_stats.collisions = adapter->stats.colc; | 3388 | netdev->stats.collisions = adapter->stats.colc; |
3386 | 3389 | ||
3387 | /* Rx Errors */ | 3390 | /* Rx Errors */ |
3388 | 3391 | ||
@@ -3390,22 +3393,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3390 | * RLEC on some newer hardware can be incorrect so build | 3393 | * RLEC on some newer hardware can be incorrect so build |
3391 | * our own version based on RUC and ROC | 3394 | * our own version based on RUC and ROC |
3392 | */ | 3395 | */ |
3393 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3396 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3394 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3397 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3395 | adapter->stats.ruc + adapter->stats.roc + | 3398 | adapter->stats.ruc + adapter->stats.roc + |
3396 | adapter->stats.cexterr; | 3399 | adapter->stats.cexterr; |
3397 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 3400 | netdev->stats.rx_length_errors = adapter->stats.ruc + |
3398 | adapter->stats.roc; | 3401 | adapter->stats.roc; |
3399 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3402 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
3400 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3403 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
3401 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3404 | netdev->stats.rx_missed_errors = adapter->stats.mpc; |
3402 | 3405 | ||
3403 | /* Tx Errors */ | 3406 | /* Tx Errors */ |
3404 | adapter->net_stats.tx_errors = adapter->stats.ecol + | 3407 | netdev->stats.tx_errors = adapter->stats.ecol + |
3405 | adapter->stats.latecol; | 3408 | adapter->stats.latecol; |
3406 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3409 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
3407 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3410 | netdev->stats.tx_window_errors = adapter->stats.latecol; |
3408 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3411 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
3409 | 3412 | ||
3410 | /* Tx Dropped needs to be maintained elsewhere */ | 3413 | /* Tx Dropped needs to be maintained elsewhere */ |
3411 | 3414 | ||
@@ -3610,7 +3613,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3610 | case SPEED_100: | 3613 | case SPEED_100: |
3611 | txb2b = 0; | 3614 | txb2b = 0; |
3612 | netdev->tx_queue_len = 100; | 3615 | netdev->tx_queue_len = 100; |
3613 | /* maybe add some timeout factor ? */ | 3616 | adapter->tx_timeout_factor = 10; |
3614 | break; | 3617 | break; |
3615 | } | 3618 | } |
3616 | 3619 | ||
@@ -3759,68 +3762,64 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3759 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | 3762 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
3760 | int err; | 3763 | int err; |
3761 | 3764 | ||
3762 | if (skb_is_gso(skb)) { | 3765 | if (!skb_is_gso(skb)) |
3763 | if (skb_header_cloned(skb)) { | 3766 | return 0; |
3764 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
3765 | if (err) | ||
3766 | return err; | ||
3767 | } | ||
3768 | 3767 | ||
3769 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3768 | if (skb_header_cloned(skb)) { |
3770 | mss = skb_shinfo(skb)->gso_size; | 3769 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
3771 | if (skb->protocol == htons(ETH_P_IP)) { | 3770 | if (err) |
3772 | struct iphdr *iph = ip_hdr(skb); | 3771 | return err; |
3773 | iph->tot_len = 0; | 3772 | } |
3774 | iph->check = 0; | ||
3775 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
3776 | iph->daddr, 0, | ||
3777 | IPPROTO_TCP, | ||
3778 | 0); | ||
3779 | cmd_length = E1000_TXD_CMD_IP; | ||
3780 | ipcse = skb_transport_offset(skb) - 1; | ||
3781 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3782 | ipv6_hdr(skb)->payload_len = 0; | ||
3783 | tcp_hdr(skb)->check = | ||
3784 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3785 | &ipv6_hdr(skb)->daddr, | ||
3786 | 0, IPPROTO_TCP, 0); | ||
3787 | ipcse = 0; | ||
3788 | } | ||
3789 | ipcss = skb_network_offset(skb); | ||
3790 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3791 | tucss = skb_transport_offset(skb); | ||
3792 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3793 | tucse = 0; | ||
3794 | 3773 | ||
3795 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 3774 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3796 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | 3775 | mss = skb_shinfo(skb)->gso_size; |
3776 | if (skb->protocol == htons(ETH_P_IP)) { | ||
3777 | struct iphdr *iph = ip_hdr(skb); | ||
3778 | iph->tot_len = 0; | ||
3779 | iph->check = 0; | ||
3780 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
3781 | 0, IPPROTO_TCP, 0); | ||
3782 | cmd_length = E1000_TXD_CMD_IP; | ||
3783 | ipcse = skb_transport_offset(skb) - 1; | ||
3784 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3785 | ipv6_hdr(skb)->payload_len = 0; | ||
3786 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3787 | &ipv6_hdr(skb)->daddr, | ||
3788 | 0, IPPROTO_TCP, 0); | ||
3789 | ipcse = 0; | ||
3790 | } | ||
3791 | ipcss = skb_network_offset(skb); | ||
3792 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3793 | tucss = skb_transport_offset(skb); | ||
3794 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3795 | tucse = 0; | ||
3797 | 3796 | ||
3798 | i = tx_ring->next_to_use; | 3797 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
3799 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 3798 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
3800 | buffer_info = &tx_ring->buffer_info[i]; | ||
3801 | 3799 | ||
3802 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | 3800 | i = tx_ring->next_to_use; |
3803 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | 3801 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3804 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | 3802 | buffer_info = &tx_ring->buffer_info[i]; |
3805 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3806 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3807 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3808 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3809 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3810 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3811 | 3803 | ||
3812 | buffer_info->time_stamp = jiffies; | 3804 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
3813 | buffer_info->next_to_watch = i; | 3805 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
3806 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | ||
3807 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3808 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3809 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3810 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3811 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3812 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3814 | 3813 | ||
3815 | i++; | 3814 | buffer_info->time_stamp = jiffies; |
3816 | if (i == tx_ring->count) | 3815 | buffer_info->next_to_watch = i; |
3817 | i = 0; | ||
3818 | tx_ring->next_to_use = i; | ||
3819 | 3816 | ||
3820 | return 1; | 3817 | i++; |
3821 | } | 3818 | if (i == tx_ring->count) |
3819 | i = 0; | ||
3820 | tx_ring->next_to_use = i; | ||
3822 | 3821 | ||
3823 | return 0; | 3822 | return 1; |
3824 | } | 3823 | } |
3825 | 3824 | ||
3826 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | 3825 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
@@ -3892,23 +3891,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3892 | unsigned int mss) | 3891 | unsigned int mss) |
3893 | { | 3892 | { |
3894 | struct e1000_ring *tx_ring = adapter->tx_ring; | 3893 | struct e1000_ring *tx_ring = adapter->tx_ring; |
3894 | struct pci_dev *pdev = adapter->pdev; | ||
3895 | struct e1000_buffer *buffer_info; | 3895 | struct e1000_buffer *buffer_info; |
3896 | unsigned int len = skb_headlen(skb); | 3896 | unsigned int len = skb_headlen(skb); |
3897 | unsigned int offset, size, count = 0, i; | 3897 | unsigned int offset = 0, size, count = 0, i; |
3898 | unsigned int f; | 3898 | unsigned int f; |
3899 | dma_addr_t *map; | ||
3900 | 3899 | ||
3901 | i = tx_ring->next_to_use; | 3900 | i = tx_ring->next_to_use; |
3902 | 3901 | ||
3903 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
3904 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
3905 | adapter->tx_dma_failed++; | ||
3906 | return 0; | ||
3907 | } | ||
3908 | |||
3909 | map = skb_shinfo(skb)->dma_maps; | ||
3910 | offset = 0; | ||
3911 | |||
3912 | while (len) { | 3902 | while (len) { |
3913 | buffer_info = &tx_ring->buffer_info[i]; | 3903 | buffer_info = &tx_ring->buffer_info[i]; |
3914 | size = min(len, max_per_txd); | 3904 | size = min(len, max_per_txd); |
@@ -3916,11 +3906,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3916 | buffer_info->length = size; | 3906 | buffer_info->length = size; |
3917 | buffer_info->time_stamp = jiffies; | 3907 | buffer_info->time_stamp = jiffies; |
3918 | buffer_info->next_to_watch = i; | 3908 | buffer_info->next_to_watch = i; |
3919 | buffer_info->dma = skb_shinfo(skb)->dma_head + offset; | 3909 | buffer_info->dma = pci_map_single(pdev, skb->data + offset, |
3920 | count++; | 3910 | size, PCI_DMA_TODEVICE); |
3911 | buffer_info->mapped_as_page = false; | ||
3912 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3913 | goto dma_error; | ||
3921 | 3914 | ||
3922 | len -= size; | 3915 | len -= size; |
3923 | offset += size; | 3916 | offset += size; |
3917 | count++; | ||
3924 | 3918 | ||
3925 | if (len) { | 3919 | if (len) { |
3926 | i++; | 3920 | i++; |
@@ -3934,7 +3928,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3934 | 3928 | ||
3935 | frag = &skb_shinfo(skb)->frags[f]; | 3929 | frag = &skb_shinfo(skb)->frags[f]; |
3936 | len = frag->size; | 3930 | len = frag->size; |
3937 | offset = 0; | 3931 | offset = frag->page_offset; |
3938 | 3932 | ||
3939 | while (len) { | 3933 | while (len) { |
3940 | i++; | 3934 | i++; |
@@ -3947,7 +3941,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3947 | buffer_info->length = size; | 3941 | buffer_info->length = size; |
3948 | buffer_info->time_stamp = jiffies; | 3942 | buffer_info->time_stamp = jiffies; |
3949 | buffer_info->next_to_watch = i; | 3943 | buffer_info->next_to_watch = i; |
3950 | buffer_info->dma = map[f] + offset; | 3944 | buffer_info->dma = pci_map_page(pdev, frag->page, |
3945 | offset, size, | ||
3946 | PCI_DMA_TODEVICE); | ||
3947 | buffer_info->mapped_as_page = true; | ||
3948 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3949 | goto dma_error; | ||
3951 | 3950 | ||
3952 | len -= size; | 3951 | len -= size; |
3953 | offset += size; | 3952 | offset += size; |
@@ -3959,6 +3958,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3959 | tx_ring->buffer_info[first].next_to_watch = i; | 3958 | tx_ring->buffer_info[first].next_to_watch = i; |
3960 | 3959 | ||
3961 | return count; | 3960 | return count; |
3961 | |||
3962 | dma_error: | ||
3963 | dev_err(&pdev->dev, "TX DMA map failed\n"); | ||
3964 | buffer_info->dma = 0; | ||
3965 | count--; | ||
3966 | |||
3967 | while (count >= 0) { | ||
3968 | count--; | ||
3969 | i--; | ||
3970 | if (i < 0) | ||
3971 | i += tx_ring->count; | ||
3972 | buffer_info = &tx_ring->buffer_info[i]; | ||
3973 | e1000_put_txbuf(adapter, buffer_info);; | ||
3974 | } | ||
3975 | |||
3976 | return 0; | ||
3962 | } | 3977 | } |
3963 | 3978 | ||
3964 | static void e1000_tx_queue(struct e1000_adapter *adapter, | 3979 | static void e1000_tx_queue(struct e1000_adapter *adapter, |
@@ -4031,8 +4046,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |||
4031 | u16 length, offset; | 4046 | u16 length, offset; |
4032 | 4047 | ||
4033 | if (vlan_tx_tag_present(skb)) { | 4048 | if (vlan_tx_tag_present(skb)) { |
4034 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) | 4049 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
4035 | && (adapter->hw.mng_cookie.status & | 4050 | (adapter->hw.mng_cookie.status & |
4036 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | 4051 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) |
4037 | return 0; | 4052 | return 0; |
4038 | } | 4053 | } |
@@ -4254,10 +4269,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
4254 | **/ | 4269 | **/ |
4255 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | 4270 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
4256 | { | 4271 | { |
4257 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4258 | |||
4259 | /* only return the current stats */ | 4272 | /* only return the current stats */ |
4260 | return &adapter->net_stats; | 4273 | return &netdev->stats; |
4261 | } | 4274 | } |
4262 | 4275 | ||
4263 | /** | 4276 | /** |
@@ -4288,8 +4301,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4288 | 4301 | ||
4289 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4302 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4290 | msleep(1); | 4303 | msleep(1); |
4291 | /* e1000e_down has a dependency on max_frame_size */ | 4304 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
4292 | adapter->max_frame_size = max_frame; | 4305 | adapter->max_frame_size = max_frame; |
4306 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4307 | netdev->mtu = new_mtu; | ||
4293 | if (netif_running(netdev)) | 4308 | if (netif_running(netdev)) |
4294 | e1000e_down(adapter); | 4309 | e1000e_down(adapter); |
4295 | 4310 | ||
@@ -4319,9 +4334,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4319 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | 4334 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
4320 | + ETH_FCS_LEN; | 4335 | + ETH_FCS_LEN; |
4321 | 4336 | ||
4322 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4323 | netdev->mtu = new_mtu; | ||
4324 | |||
4325 | if (netif_running(netdev)) | 4337 | if (netif_running(netdev)) |
4326 | e1000e_up(adapter); | 4338 | e1000e_up(adapter); |
4327 | else | 4339 | else |
@@ -4346,6 +4358,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4346 | data->phy_id = adapter->hw.phy.addr; | 4358 | data->phy_id = adapter->hw.phy.addr; |
4347 | break; | 4359 | break; |
4348 | case SIOCGMIIREG: | 4360 | case SIOCGMIIREG: |
4361 | e1000_phy_read_status(adapter); | ||
4362 | |||
4349 | switch (data->reg_num & 0x1F) { | 4363 | switch (data->reg_num & 0x1F) { |
4350 | case MII_BMCR: | 4364 | case MII_BMCR: |
4351 | data->val_out = adapter->phy_regs.bmcr; | 4365 | data->val_out = adapter->phy_regs.bmcr; |
@@ -4453,7 +4467,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4453 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | 4467 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); |
4454 | 4468 | ||
4455 | /* activate PHY wakeup */ | 4469 | /* activate PHY wakeup */ |
4456 | retval = hw->phy.ops.acquire_phy(hw); | 4470 | retval = hw->phy.ops.acquire(hw); |
4457 | if (retval) { | 4471 | if (retval) { |
4458 | e_err("Could not acquire PHY\n"); | 4472 | e_err("Could not acquire PHY\n"); |
4459 | return retval; | 4473 | return retval; |
@@ -4470,7 +4484,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4470 | if (retval) | 4484 | if (retval) |
4471 | e_err("Could not set PHY Host Wakeup bit\n"); | 4485 | e_err("Could not set PHY Host Wakeup bit\n"); |
4472 | out: | 4486 | out: |
4473 | hw->phy.ops.release_phy(hw); | 4487 | hw->phy.ops.release(hw); |
4474 | 4488 | ||
4475 | return retval; | 4489 | return retval; |
4476 | } | 4490 | } |
@@ -5144,6 +5158,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5144 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | 5158 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
5145 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | 5159 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); |
5146 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | 5160 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
5161 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | ||
5147 | 5162 | ||
5148 | /* Initialize link parameters. User can change them with ethtool */ | 5163 | /* Initialize link parameters. User can change them with ethtool */ |
5149 | adapter->hw.mac.autoneg = 1; | 5164 | adapter->hw.mac.autoneg = 1; |
@@ -5267,19 +5282,24 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5267 | del_timer_sync(&adapter->watchdog_timer); | 5282 | del_timer_sync(&adapter->watchdog_timer); |
5268 | del_timer_sync(&adapter->phy_info_timer); | 5283 | del_timer_sync(&adapter->phy_info_timer); |
5269 | 5284 | ||
5285 | cancel_work_sync(&adapter->reset_task); | ||
5286 | cancel_work_sync(&adapter->watchdog_task); | ||
5287 | cancel_work_sync(&adapter->downshift_task); | ||
5288 | cancel_work_sync(&adapter->update_phy_task); | ||
5289 | cancel_work_sync(&adapter->print_hang_task); | ||
5270 | flush_scheduled_work(); | 5290 | flush_scheduled_work(); |
5271 | 5291 | ||
5292 | if (!(netdev->flags & IFF_UP)) | ||
5293 | e1000_power_down_phy(adapter); | ||
5294 | |||
5295 | unregister_netdev(netdev); | ||
5296 | |||
5272 | /* | 5297 | /* |
5273 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5298 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5274 | * would have already happened in close and is redundant. | 5299 | * would have already happened in close and is redundant. |
5275 | */ | 5300 | */ |
5276 | e1000_release_hw_control(adapter); | 5301 | e1000_release_hw_control(adapter); |
5277 | 5302 | ||
5278 | unregister_netdev(netdev); | ||
5279 | |||
5280 | if (!e1000_check_reset_block(&adapter->hw)) | ||
5281 | e1000_phy_hw_reset(&adapter->hw); | ||
5282 | |||
5283 | e1000e_reset_interrupt_capability(adapter); | 5303 | e1000e_reset_interrupt_capability(adapter); |
5284 | kfree(adapter->tx_ring); | 5304 | kfree(adapter->tx_ring); |
5285 | kfree(adapter->rx_ring); | 5305 | kfree(adapter->rx_ring); |
@@ -5345,6 +5365,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
5345 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | 5365 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, |
5346 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | 5366 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, |
5347 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | 5367 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, |
5368 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, | ||
5348 | 5369 | ||
5349 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, | 5370 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
5350 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | 5371 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, |
@@ -5398,12 +5419,10 @@ static int __init e1000_init_module(void) | |||
5398 | int ret; | 5419 | int ret; |
5399 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | 5420 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
5400 | e1000e_driver_name, e1000e_driver_version); | 5421 | e1000e_driver_name, e1000e_driver_version); |
5401 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", | 5422 | printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", |
5402 | e1000e_driver_name); | 5423 | e1000e_driver_name); |
5403 | ret = pci_register_driver(&e1000_driver); | 5424 | ret = pci_register_driver(&e1000_driver); |
5404 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, | 5425 | |
5405 | PM_QOS_DEFAULT_VALUE); | ||
5406 | |||
5407 | return ret; | 5426 | return ret; |
5408 | } | 5427 | } |
5409 | module_init(e1000_init_module); | 5428 | module_init(e1000_init_module); |
@@ -5417,7 +5436,6 @@ module_init(e1000_init_module); | |||
5417 | static void __exit e1000_exit_module(void) | 5436 | static void __exit e1000_exit_module(void) |
5418 | { | 5437 | { |
5419 | pci_unregister_driver(&e1000_driver); | 5438 | pci_unregister_driver(&e1000_driver); |
5420 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); | ||
5421 | } | 5439 | } |
5422 | module_exit(e1000_exit_module); | 5440 | module_exit(e1000_exit_module); |
5423 | 5441 | ||
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index 1342e0b1815c..2e399778cae5 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 03175b3a2c9e..55a2c0acfee7 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -44,6 +44,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
44 | /* Cable length tables */ | 44 | /* Cable length tables */ |
45 | static const u16 e1000_m88_cable_length_table[] = | 45 | static const u16 e1000_m88_cable_length_table[] = |
46 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | 46 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; |
47 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | ||
48 | ARRAY_SIZE(e1000_m88_cable_length_table) | ||
47 | 49 | ||
48 | static const u16 e1000_igp_2_cable_length_table[] = | 50 | static const u16 e1000_igp_2_cable_length_table[] = |
49 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, | 51 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, |
@@ -71,7 +73,6 @@ static const u16 e1000_igp_2_cable_length_table[] = | |||
71 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) | 73 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) |
72 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ | 74 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ |
73 | #define I82577_CTRL_REG 23 | 75 | #define I82577_CTRL_REG 23 |
74 | #define I82577_CTRL_DOWNSHIFT_MASK (7 << 10) | ||
75 | 76 | ||
76 | /* 82577 specific PHY registers */ | 77 | /* 82577 specific PHY registers */ |
77 | #define I82577_PHY_CTRL_2 18 | 78 | #define I82577_PHY_CTRL_2 18 |
@@ -131,7 +132,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
131 | u16 phy_id; | 132 | u16 phy_id; |
132 | u16 retry_count = 0; | 133 | u16 retry_count = 0; |
133 | 134 | ||
134 | if (!(phy->ops.read_phy_reg)) | 135 | if (!(phy->ops.read_reg)) |
135 | goto out; | 136 | goto out; |
136 | 137 | ||
137 | while (retry_count < 2) { | 138 | while (retry_count < 2) { |
@@ -152,29 +153,29 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
152 | goto out; | 153 | goto out; |
153 | 154 | ||
154 | /* | 155 | /* |
155 | * If the PHY ID is still unknown, we may have an 82577i | 156 | * If the PHY ID is still unknown, we may have an 82577 |
156 | * without link. We will try again after setting Slow | 157 | * without link. We will try again after setting Slow MDIC |
157 | * MDIC mode. No harm in trying again in this case since | 158 | * mode. No harm in trying again in this case since the PHY |
158 | * the PHY ID is unknown at this point anyway | 159 | * ID is unknown at this point anyway. |
159 | */ | 160 | */ |
160 | ret_val = phy->ops.acquire_phy(hw); | 161 | ret_val = phy->ops.acquire(hw); |
161 | if (ret_val) | 162 | if (ret_val) |
162 | goto out; | 163 | goto out; |
163 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | 164 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); |
164 | if (ret_val) | 165 | if (ret_val) |
165 | goto out; | 166 | goto out; |
166 | phy->ops.release_phy(hw); | 167 | phy->ops.release(hw); |
167 | 168 | ||
168 | retry_count++; | 169 | retry_count++; |
169 | } | 170 | } |
170 | out: | 171 | out: |
171 | /* Revert to MDIO fast mode, if applicable */ | 172 | /* Revert to MDIO fast mode, if applicable */ |
172 | if (retry_count) { | 173 | if (retry_count) { |
173 | ret_val = phy->ops.acquire_phy(hw); | 174 | ret_val = phy->ops.acquire(hw); |
174 | if (ret_val) | 175 | if (ret_val) |
175 | return ret_val; | 176 | return ret_val; |
176 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 177 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); |
177 | phy->ops.release_phy(hw); | 178 | phy->ops.release(hw); |
178 | } | 179 | } |
179 | 180 | ||
180 | return ret_val; | 181 | return ret_val; |
@@ -212,7 +213,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
212 | u32 i, mdic = 0; | 213 | u32 i, mdic = 0; |
213 | 214 | ||
214 | if (offset > MAX_PHY_REG_ADDRESS) { | 215 | if (offset > MAX_PHY_REG_ADDRESS) { |
215 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 216 | e_dbg("PHY Address %d is out of range\n", offset); |
216 | return -E1000_ERR_PARAM; | 217 | return -E1000_ERR_PARAM; |
217 | } | 218 | } |
218 | 219 | ||
@@ -239,11 +240,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
239 | break; | 240 | break; |
240 | } | 241 | } |
241 | if (!(mdic & E1000_MDIC_READY)) { | 242 | if (!(mdic & E1000_MDIC_READY)) { |
242 | hw_dbg(hw, "MDI Read did not complete\n"); | 243 | e_dbg("MDI Read did not complete\n"); |
243 | return -E1000_ERR_PHY; | 244 | return -E1000_ERR_PHY; |
244 | } | 245 | } |
245 | if (mdic & E1000_MDIC_ERROR) { | 246 | if (mdic & E1000_MDIC_ERROR) { |
246 | hw_dbg(hw, "MDI Error\n"); | 247 | e_dbg("MDI Error\n"); |
247 | return -E1000_ERR_PHY; | 248 | return -E1000_ERR_PHY; |
248 | } | 249 | } |
249 | *data = (u16) mdic; | 250 | *data = (u16) mdic; |
@@ -265,7 +266,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
265 | u32 i, mdic = 0; | 266 | u32 i, mdic = 0; |
266 | 267 | ||
267 | if (offset > MAX_PHY_REG_ADDRESS) { | 268 | if (offset > MAX_PHY_REG_ADDRESS) { |
268 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 269 | e_dbg("PHY Address %d is out of range\n", offset); |
269 | return -E1000_ERR_PARAM; | 270 | return -E1000_ERR_PARAM; |
270 | } | 271 | } |
271 | 272 | ||
@@ -293,11 +294,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
293 | break; | 294 | break; |
294 | } | 295 | } |
295 | if (!(mdic & E1000_MDIC_READY)) { | 296 | if (!(mdic & E1000_MDIC_READY)) { |
296 | hw_dbg(hw, "MDI Write did not complete\n"); | 297 | e_dbg("MDI Write did not complete\n"); |
297 | return -E1000_ERR_PHY; | 298 | return -E1000_ERR_PHY; |
298 | } | 299 | } |
299 | if (mdic & E1000_MDIC_ERROR) { | 300 | if (mdic & E1000_MDIC_ERROR) { |
300 | hw_dbg(hw, "MDI Error\n"); | 301 | e_dbg("MDI Error\n"); |
301 | return -E1000_ERR_PHY; | 302 | return -E1000_ERR_PHY; |
302 | } | 303 | } |
303 | 304 | ||
@@ -318,14 +319,14 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) | |||
318 | { | 319 | { |
319 | s32 ret_val; | 320 | s32 ret_val; |
320 | 321 | ||
321 | ret_val = hw->phy.ops.acquire_phy(hw); | 322 | ret_val = hw->phy.ops.acquire(hw); |
322 | if (ret_val) | 323 | if (ret_val) |
323 | return ret_val; | 324 | return ret_val; |
324 | 325 | ||
325 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 326 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
326 | data); | 327 | data); |
327 | 328 | ||
328 | hw->phy.ops.release_phy(hw); | 329 | hw->phy.ops.release(hw); |
329 | 330 | ||
330 | return ret_val; | 331 | return ret_val; |
331 | } | 332 | } |
@@ -343,14 +344,14 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) | |||
343 | { | 344 | { |
344 | s32 ret_val; | 345 | s32 ret_val; |
345 | 346 | ||
346 | ret_val = hw->phy.ops.acquire_phy(hw); | 347 | ret_val = hw->phy.ops.acquire(hw); |
347 | if (ret_val) | 348 | if (ret_val) |
348 | return ret_val; | 349 | return ret_val; |
349 | 350 | ||
350 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 351 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
351 | data); | 352 | data); |
352 | 353 | ||
353 | hw->phy.ops.release_phy(hw); | 354 | hw->phy.ops.release(hw); |
354 | 355 | ||
355 | return ret_val; | 356 | return ret_val; |
356 | } | 357 | } |
@@ -372,10 +373,10 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
372 | s32 ret_val = 0; | 373 | s32 ret_val = 0; |
373 | 374 | ||
374 | if (!locked) { | 375 | if (!locked) { |
375 | if (!(hw->phy.ops.acquire_phy)) | 376 | if (!(hw->phy.ops.acquire)) |
376 | goto out; | 377 | goto out; |
377 | 378 | ||
378 | ret_val = hw->phy.ops.acquire_phy(hw); | 379 | ret_val = hw->phy.ops.acquire(hw); |
379 | if (ret_val) | 380 | if (ret_val) |
380 | goto out; | 381 | goto out; |
381 | } | 382 | } |
@@ -393,7 +394,7 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
393 | 394 | ||
394 | release: | 395 | release: |
395 | if (!locked) | 396 | if (!locked) |
396 | hw->phy.ops.release_phy(hw); | 397 | hw->phy.ops.release(hw); |
397 | out: | 398 | out: |
398 | return ret_val; | 399 | return ret_val; |
399 | } | 400 | } |
@@ -443,10 +444,10 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
443 | s32 ret_val = 0; | 444 | s32 ret_val = 0; |
444 | 445 | ||
445 | if (!locked) { | 446 | if (!locked) { |
446 | if (!(hw->phy.ops.acquire_phy)) | 447 | if (!(hw->phy.ops.acquire)) |
447 | goto out; | 448 | goto out; |
448 | 449 | ||
449 | ret_val = hw->phy.ops.acquire_phy(hw); | 450 | ret_val = hw->phy.ops.acquire(hw); |
450 | if (ret_val) | 451 | if (ret_val) |
451 | goto out; | 452 | goto out; |
452 | } | 453 | } |
@@ -464,7 +465,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
464 | 465 | ||
465 | release: | 466 | release: |
466 | if (!locked) | 467 | if (!locked) |
467 | hw->phy.ops.release_phy(hw); | 468 | hw->phy.ops.release(hw); |
468 | 469 | ||
469 | out: | 470 | out: |
470 | return ret_val; | 471 | return ret_val; |
@@ -516,10 +517,10 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
516 | s32 ret_val = 0; | 517 | s32 ret_val = 0; |
517 | 518 | ||
518 | if (!locked) { | 519 | if (!locked) { |
519 | if (!(hw->phy.ops.acquire_phy)) | 520 | if (!(hw->phy.ops.acquire)) |
520 | goto out; | 521 | goto out; |
521 | 522 | ||
522 | ret_val = hw->phy.ops.acquire_phy(hw); | 523 | ret_val = hw->phy.ops.acquire(hw); |
523 | if (ret_val) | 524 | if (ret_val) |
524 | goto out; | 525 | goto out; |
525 | } | 526 | } |
@@ -534,7 +535,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
534 | *data = (u16)kmrnctrlsta; | 535 | *data = (u16)kmrnctrlsta; |
535 | 536 | ||
536 | if (!locked) | 537 | if (!locked) |
537 | hw->phy.ops.release_phy(hw); | 538 | hw->phy.ops.release(hw); |
538 | 539 | ||
539 | out: | 540 | out: |
540 | return ret_val; | 541 | return ret_val; |
@@ -588,10 +589,10 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
588 | s32 ret_val = 0; | 589 | s32 ret_val = 0; |
589 | 590 | ||
590 | if (!locked) { | 591 | if (!locked) { |
591 | if (!(hw->phy.ops.acquire_phy)) | 592 | if (!(hw->phy.ops.acquire)) |
592 | goto out; | 593 | goto out; |
593 | 594 | ||
594 | ret_val = hw->phy.ops.acquire_phy(hw); | 595 | ret_val = hw->phy.ops.acquire(hw); |
595 | if (ret_val) | 596 | if (ret_val) |
596 | goto out; | 597 | goto out; |
597 | } | 598 | } |
@@ -603,7 +604,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
603 | udelay(2); | 604 | udelay(2); |
604 | 605 | ||
605 | if (!locked) | 606 | if (!locked) |
606 | hw->phy.ops.release_phy(hw); | 607 | hw->phy.ops.release(hw); |
607 | 608 | ||
608 | out: | 609 | out: |
609 | return ret_val; | 610 | return ret_val; |
@@ -650,7 +651,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
650 | u16 phy_data; | 651 | u16 phy_data; |
651 | 652 | ||
652 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 653 | /* Enable CRS on TX. This must be set for half-duplex operation. */ |
653 | ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data); | 654 | ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); |
654 | if (ret_val) | 655 | if (ret_val) |
655 | goto out; | 656 | goto out; |
656 | 657 | ||
@@ -659,16 +660,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
659 | /* Enable downshift */ | 660 | /* Enable downshift */ |
660 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | 661 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; |
661 | 662 | ||
662 | ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); | 663 | ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); |
663 | if (ret_val) | ||
664 | goto out; | ||
665 | |||
666 | /* Set number of link attempts before downshift */ | ||
667 | ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data); | ||
668 | if (ret_val) | ||
669 | goto out; | ||
670 | phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK; | ||
671 | ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data); | ||
672 | 664 | ||
673 | out: | 665 | out: |
674 | return ret_val; | 666 | return ret_val; |
@@ -786,12 +778,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
786 | /* Commit the changes. */ | 778 | /* Commit the changes. */ |
787 | ret_val = e1000e_commit_phy(hw); | 779 | ret_val = e1000e_commit_phy(hw); |
788 | if (ret_val) { | 780 | if (ret_val) { |
789 | hw_dbg(hw, "Error committing the PHY changes\n"); | 781 | e_dbg("Error committing the PHY changes\n"); |
790 | return ret_val; | 782 | return ret_val; |
791 | } | 783 | } |
792 | 784 | ||
793 | if (phy->type == e1000_phy_82578) { | 785 | if (phy->type == e1000_phy_82578) { |
794 | ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 786 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
795 | &phy_data); | 787 | &phy_data); |
796 | if (ret_val) | 788 | if (ret_val) |
797 | return ret_val; | 789 | return ret_val; |
@@ -799,7 +791,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
799 | /* 82578 PHY - set the downshift count to 1x. */ | 791 | /* 82578 PHY - set the downshift count to 1x. */ |
800 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; | 792 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; |
801 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; | 793 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; |
802 | ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 794 | ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
803 | phy_data); | 795 | phy_data); |
804 | if (ret_val) | 796 | if (ret_val) |
805 | return ret_val; | 797 | return ret_val; |
@@ -823,7 +815,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
823 | 815 | ||
824 | ret_val = e1000_phy_hw_reset(hw); | 816 | ret_val = e1000_phy_hw_reset(hw); |
825 | if (ret_val) { | 817 | if (ret_val) { |
826 | hw_dbg(hw, "Error resetting the PHY.\n"); | 818 | e_dbg("Error resetting the PHY.\n"); |
827 | return ret_val; | 819 | return ret_val; |
828 | } | 820 | } |
829 | 821 | ||
@@ -834,9 +826,9 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
834 | msleep(100); | 826 | msleep(100); |
835 | 827 | ||
836 | /* disable lplu d0 during driver init */ | 828 | /* disable lplu d0 during driver init */ |
837 | ret_val = e1000_set_d0_lplu_state(hw, 0); | 829 | ret_val = e1000_set_d0_lplu_state(hw, false); |
838 | if (ret_val) { | 830 | if (ret_val) { |
839 | hw_dbg(hw, "Error Disabling LPLU D0\n"); | 831 | e_dbg("Error Disabling LPLU D0\n"); |
840 | return ret_val; | 832 | return ret_val; |
841 | } | 833 | } |
842 | /* Configure mdi-mdix settings */ | 834 | /* Configure mdi-mdix settings */ |
@@ -972,39 +964,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
972 | NWAY_AR_10T_HD_CAPS); | 964 | NWAY_AR_10T_HD_CAPS); |
973 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | 965 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); |
974 | 966 | ||
975 | hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); | 967 | e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); |
976 | 968 | ||
977 | /* Do we want to advertise 10 Mb Half Duplex? */ | 969 | /* Do we want to advertise 10 Mb Half Duplex? */ |
978 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | 970 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { |
979 | hw_dbg(hw, "Advertise 10mb Half duplex\n"); | 971 | e_dbg("Advertise 10mb Half duplex\n"); |
980 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | 972 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; |
981 | } | 973 | } |
982 | 974 | ||
983 | /* Do we want to advertise 10 Mb Full Duplex? */ | 975 | /* Do we want to advertise 10 Mb Full Duplex? */ |
984 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | 976 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { |
985 | hw_dbg(hw, "Advertise 10mb Full duplex\n"); | 977 | e_dbg("Advertise 10mb Full duplex\n"); |
986 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | 978 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; |
987 | } | 979 | } |
988 | 980 | ||
989 | /* Do we want to advertise 100 Mb Half Duplex? */ | 981 | /* Do we want to advertise 100 Mb Half Duplex? */ |
990 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | 982 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { |
991 | hw_dbg(hw, "Advertise 100mb Half duplex\n"); | 983 | e_dbg("Advertise 100mb Half duplex\n"); |
992 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | 984 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; |
993 | } | 985 | } |
994 | 986 | ||
995 | /* Do we want to advertise 100 Mb Full Duplex? */ | 987 | /* Do we want to advertise 100 Mb Full Duplex? */ |
996 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | 988 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { |
997 | hw_dbg(hw, "Advertise 100mb Full duplex\n"); | 989 | e_dbg("Advertise 100mb Full duplex\n"); |
998 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | 990 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; |
999 | } | 991 | } |
1000 | 992 | ||
1001 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | 993 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ |
1002 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) | 994 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) |
1003 | hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); | 995 | e_dbg("Advertise 1000mb Half duplex request denied!\n"); |
1004 | 996 | ||
1005 | /* Do we want to advertise 1000 Mb Full Duplex? */ | 997 | /* Do we want to advertise 1000 Mb Full Duplex? */ |
1006 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | 998 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { |
1007 | hw_dbg(hw, "Advertise 1000mb Full duplex\n"); | 999 | e_dbg("Advertise 1000mb Full duplex\n"); |
1008 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 1000 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
1009 | } | 1001 | } |
1010 | 1002 | ||
@@ -1063,7 +1055,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1063 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1055 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1064 | break; | 1056 | break; |
1065 | default: | 1057 | default: |
1066 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 1058 | e_dbg("Flow control param set incorrectly\n"); |
1067 | ret_val = -E1000_ERR_CONFIG; | 1059 | ret_val = -E1000_ERR_CONFIG; |
1068 | return ret_val; | 1060 | return ret_val; |
1069 | } | 1061 | } |
@@ -1072,7 +1064,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1072 | if (ret_val) | 1064 | if (ret_val) |
1073 | return ret_val; | 1065 | return ret_val; |
1074 | 1066 | ||
1075 | hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1067 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1076 | 1068 | ||
1077 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | 1069 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { |
1078 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1070 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
@@ -1109,13 +1101,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1109 | if (phy->autoneg_advertised == 0) | 1101 | if (phy->autoneg_advertised == 0) |
1110 | phy->autoneg_advertised = phy->autoneg_mask; | 1102 | phy->autoneg_advertised = phy->autoneg_mask; |
1111 | 1103 | ||
1112 | hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); | 1104 | e_dbg("Reconfiguring auto-neg advertisement params\n"); |
1113 | ret_val = e1000_phy_setup_autoneg(hw); | 1105 | ret_val = e1000_phy_setup_autoneg(hw); |
1114 | if (ret_val) { | 1106 | if (ret_val) { |
1115 | hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); | 1107 | e_dbg("Error Setting up Auto-Negotiation\n"); |
1116 | return ret_val; | 1108 | return ret_val; |
1117 | } | 1109 | } |
1118 | hw_dbg(hw, "Restarting Auto-Neg\n"); | 1110 | e_dbg("Restarting Auto-Neg\n"); |
1119 | 1111 | ||
1120 | /* | 1112 | /* |
1121 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | 1113 | * Restart auto-negotiation by setting the Auto Neg Enable bit and |
@@ -1137,7 +1129,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1137 | if (phy->autoneg_wait_to_complete) { | 1129 | if (phy->autoneg_wait_to_complete) { |
1138 | ret_val = e1000_wait_autoneg(hw); | 1130 | ret_val = e1000_wait_autoneg(hw); |
1139 | if (ret_val) { | 1131 | if (ret_val) { |
1140 | hw_dbg(hw, "Error while waiting for " | 1132 | e_dbg("Error while waiting for " |
1141 | "autoneg to complete\n"); | 1133 | "autoneg to complete\n"); |
1142 | return ret_val; | 1134 | return ret_val; |
1143 | } | 1135 | } |
@@ -1175,10 +1167,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1175 | * PHY will be set to 10H, 10F, 100H or 100F | 1167 | * PHY will be set to 10H, 10F, 100H or 100F |
1176 | * depending on user settings. | 1168 | * depending on user settings. |
1177 | */ | 1169 | */ |
1178 | hw_dbg(hw, "Forcing Speed and Duplex\n"); | 1170 | e_dbg("Forcing Speed and Duplex\n"); |
1179 | ret_val = e1000_phy_force_speed_duplex(hw); | 1171 | ret_val = e1000_phy_force_speed_duplex(hw); |
1180 | if (ret_val) { | 1172 | if (ret_val) { |
1181 | hw_dbg(hw, "Error Forcing Speed and Duplex\n"); | 1173 | e_dbg("Error Forcing Speed and Duplex\n"); |
1182 | return ret_val; | 1174 | return ret_val; |
1183 | } | 1175 | } |
1184 | } | 1176 | } |
@@ -1195,11 +1187,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1195 | return ret_val; | 1187 | return ret_val; |
1196 | 1188 | ||
1197 | if (link) { | 1189 | if (link) { |
1198 | hw_dbg(hw, "Valid link established!!!\n"); | 1190 | e_dbg("Valid link established!!!\n"); |
1199 | e1000e_config_collision_dist(hw); | 1191 | e1000e_config_collision_dist(hw); |
1200 | ret_val = e1000e_config_fc_after_link_up(hw); | 1192 | ret_val = e1000e_config_fc_after_link_up(hw); |
1201 | } else { | 1193 | } else { |
1202 | hw_dbg(hw, "Unable to establish link!!!\n"); | 1194 | e_dbg("Unable to establish link!!!\n"); |
1203 | } | 1195 | } |
1204 | 1196 | ||
1205 | return ret_val; | 1197 | return ret_val; |
@@ -1245,12 +1237,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1245 | if (ret_val) | 1237 | if (ret_val) |
1246 | return ret_val; | 1238 | return ret_val; |
1247 | 1239 | ||
1248 | hw_dbg(hw, "IGP PSCR: %X\n", phy_data); | 1240 | e_dbg("IGP PSCR: %X\n", phy_data); |
1249 | 1241 | ||
1250 | udelay(1); | 1242 | udelay(1); |
1251 | 1243 | ||
1252 | if (phy->autoneg_wait_to_complete) { | 1244 | if (phy->autoneg_wait_to_complete) { |
1253 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); | 1245 | e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); |
1254 | 1246 | ||
1255 | ret_val = e1000e_phy_has_link_generic(hw, | 1247 | ret_val = e1000e_phy_has_link_generic(hw, |
1256 | PHY_FORCE_LIMIT, | 1248 | PHY_FORCE_LIMIT, |
@@ -1260,7 +1252,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1260 | return ret_val; | 1252 | return ret_val; |
1261 | 1253 | ||
1262 | if (!link) | 1254 | if (!link) |
1263 | hw_dbg(hw, "Link taking longer than expected.\n"); | 1255 | e_dbg("Link taking longer than expected.\n"); |
1264 | 1256 | ||
1265 | /* Try once more */ | 1257 | /* Try once more */ |
1266 | ret_val = e1000e_phy_has_link_generic(hw, | 1258 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -1304,7 +1296,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1304 | if (ret_val) | 1296 | if (ret_val) |
1305 | return ret_val; | 1297 | return ret_val; |
1306 | 1298 | ||
1307 | hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); | 1299 | e_dbg("M88E1000 PSCR: %X\n", phy_data); |
1308 | 1300 | ||
1309 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 1301 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
1310 | if (ret_val) | 1302 | if (ret_val) |
@@ -1322,7 +1314,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1322 | return ret_val; | 1314 | return ret_val; |
1323 | 1315 | ||
1324 | if (phy->autoneg_wait_to_complete) { | 1316 | if (phy->autoneg_wait_to_complete) { |
1325 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); | 1317 | e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); |
1326 | 1318 | ||
1327 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 1319 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
1328 | 100000, &link); | 1320 | 100000, &link); |
@@ -1330,17 +1322,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1330 | return ret_val; | 1322 | return ret_val; |
1331 | 1323 | ||
1332 | if (!link) { | 1324 | if (!link) { |
1333 | /* | 1325 | if (hw->phy.type != e1000_phy_m88) { |
1334 | * We didn't get link. | 1326 | e_dbg("Link taking longer than expected.\n"); |
1335 | * Reset the DSP and cross our fingers. | 1327 | } else { |
1336 | */ | 1328 | /* |
1337 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, | 1329 | * We didn't get link. |
1338 | 0x001d); | 1330 | * Reset the DSP and cross our fingers. |
1339 | if (ret_val) | 1331 | */ |
1340 | return ret_val; | 1332 | ret_val = e1e_wphy(hw, |
1341 | ret_val = e1000e_phy_reset_dsp(hw); | 1333 | M88E1000_PHY_PAGE_SELECT, |
1342 | if (ret_val) | 1334 | 0x001d); |
1343 | return ret_val; | 1335 | if (ret_val) |
1336 | return ret_val; | ||
1337 | ret_val = e1000e_phy_reset_dsp(hw); | ||
1338 | if (ret_val) | ||
1339 | return ret_val; | ||
1340 | } | ||
1344 | } | 1341 | } |
1345 | 1342 | ||
1346 | /* Try once more */ | 1343 | /* Try once more */ |
@@ -1350,6 +1347,9 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1350 | return ret_val; | 1347 | return ret_val; |
1351 | } | 1348 | } |
1352 | 1349 | ||
1350 | if (hw->phy.type != e1000_phy_m88) | ||
1351 | return 0; | ||
1352 | |||
1353 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | 1353 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); |
1354 | if (ret_val) | 1354 | if (ret_val) |
1355 | return ret_val; | 1355 | return ret_val; |
@@ -1379,6 +1379,73 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | /** | 1381 | /** |
1382 | * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex | ||
1383 | * @hw: pointer to the HW structure | ||
1384 | * | ||
1385 | * Forces the speed and duplex settings of the PHY. | ||
1386 | * This is a function pointer entry point only called by | ||
1387 | * PHY setup routines. | ||
1388 | **/ | ||
1389 | s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) | ||
1390 | { | ||
1391 | struct e1000_phy_info *phy = &hw->phy; | ||
1392 | s32 ret_val; | ||
1393 | u16 data; | ||
1394 | bool link; | ||
1395 | |||
1396 | ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | ||
1397 | if (ret_val) | ||
1398 | goto out; | ||
1399 | |||
1400 | e1000e_phy_force_speed_duplex_setup(hw, &data); | ||
1401 | |||
1402 | ret_val = e1e_wphy(hw, PHY_CONTROL, data); | ||
1403 | if (ret_val) | ||
1404 | goto out; | ||
1405 | |||
1406 | /* Disable MDI-X support for 10/100 */ | ||
1407 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
1408 | if (ret_val) | ||
1409 | goto out; | ||
1410 | |||
1411 | data &= ~IFE_PMC_AUTO_MDIX; | ||
1412 | data &= ~IFE_PMC_FORCE_MDIX; | ||
1413 | |||
1414 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | ||
1415 | if (ret_val) | ||
1416 | goto out; | ||
1417 | |||
1418 | e_dbg("IFE PMC: %X\n", data); | ||
1419 | |||
1420 | udelay(1); | ||
1421 | |||
1422 | if (phy->autoneg_wait_to_complete) { | ||
1423 | e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); | ||
1424 | |||
1425 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1426 | PHY_FORCE_LIMIT, | ||
1427 | 100000, | ||
1428 | &link); | ||
1429 | if (ret_val) | ||
1430 | goto out; | ||
1431 | |||
1432 | if (!link) | ||
1433 | e_dbg("Link taking longer than expected.\n"); | ||
1434 | |||
1435 | /* Try once more */ | ||
1436 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1437 | PHY_FORCE_LIMIT, | ||
1438 | 100000, | ||
1439 | &link); | ||
1440 | if (ret_val) | ||
1441 | goto out; | ||
1442 | } | ||
1443 | |||
1444 | out: | ||
1445 | return ret_val; | ||
1446 | } | ||
1447 | |||
1448 | /** | ||
1382 | * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex | 1449 | * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex |
1383 | * @hw: pointer to the HW structure | 1450 | * @hw: pointer to the HW structure |
1384 | * @phy_ctrl: pointer to current value of PHY_CONTROL | 1451 | * @phy_ctrl: pointer to current value of PHY_CONTROL |
@@ -1413,11 +1480,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1413 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | 1480 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { |
1414 | ctrl &= ~E1000_CTRL_FD; | 1481 | ctrl &= ~E1000_CTRL_FD; |
1415 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | 1482 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; |
1416 | hw_dbg(hw, "Half Duplex\n"); | 1483 | e_dbg("Half Duplex\n"); |
1417 | } else { | 1484 | } else { |
1418 | ctrl |= E1000_CTRL_FD; | 1485 | ctrl |= E1000_CTRL_FD; |
1419 | *phy_ctrl |= MII_CR_FULL_DUPLEX; | 1486 | *phy_ctrl |= MII_CR_FULL_DUPLEX; |
1420 | hw_dbg(hw, "Full Duplex\n"); | 1487 | e_dbg("Full Duplex\n"); |
1421 | } | 1488 | } |
1422 | 1489 | ||
1423 | /* Forcing 10mb or 100mb? */ | 1490 | /* Forcing 10mb or 100mb? */ |
@@ -1425,12 +1492,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1425 | ctrl |= E1000_CTRL_SPD_100; | 1492 | ctrl |= E1000_CTRL_SPD_100; |
1426 | *phy_ctrl |= MII_CR_SPEED_100; | 1493 | *phy_ctrl |= MII_CR_SPEED_100; |
1427 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | 1494 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); |
1428 | hw_dbg(hw, "Forcing 100mb\n"); | 1495 | e_dbg("Forcing 100mb\n"); |
1429 | } else { | 1496 | } else { |
1430 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | 1497 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); |
1431 | *phy_ctrl |= MII_CR_SPEED_10; | 1498 | *phy_ctrl |= MII_CR_SPEED_10; |
1432 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | 1499 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); |
1433 | hw_dbg(hw, "Forcing 10mb\n"); | 1500 | e_dbg("Forcing 10mb\n"); |
1434 | } | 1501 | } |
1435 | 1502 | ||
1436 | e1000e_config_collision_dist(hw); | 1503 | e1000e_config_collision_dist(hw); |
@@ -1533,8 +1600,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1533 | switch (phy->type) { | 1600 | switch (phy->type) { |
1534 | case e1000_phy_m88: | 1601 | case e1000_phy_m88: |
1535 | case e1000_phy_gg82563: | 1602 | case e1000_phy_gg82563: |
1603 | case e1000_phy_bm: | ||
1536 | case e1000_phy_82578: | 1604 | case e1000_phy_82578: |
1537 | case e1000_phy_82577: | ||
1538 | offset = M88E1000_PHY_SPEC_STATUS; | 1605 | offset = M88E1000_PHY_SPEC_STATUS; |
1539 | mask = M88E1000_PSSR_DOWNSHIFT; | 1606 | mask = M88E1000_PSSR_DOWNSHIFT; |
1540 | break; | 1607 | break; |
@@ -1545,7 +1612,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1545 | break; | 1612 | break; |
1546 | default: | 1613 | default: |
1547 | /* speed downshift not supported */ | 1614 | /* speed downshift not supported */ |
1548 | phy->speed_downgraded = 0; | 1615 | phy->speed_downgraded = false; |
1549 | return 0; | 1616 | return 0; |
1550 | } | 1617 | } |
1551 | 1618 | ||
@@ -1565,7 +1632,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1565 | * | 1632 | * |
1566 | * Polarity is determined based on the PHY specific status register. | 1633 | * Polarity is determined based on the PHY specific status register. |
1567 | **/ | 1634 | **/ |
1568 | static s32 e1000_check_polarity_m88(struct e1000_hw *hw) | 1635 | s32 e1000_check_polarity_m88(struct e1000_hw *hw) |
1569 | { | 1636 | { |
1570 | struct e1000_phy_info *phy = &hw->phy; | 1637 | struct e1000_phy_info *phy = &hw->phy; |
1571 | s32 ret_val; | 1638 | s32 ret_val; |
@@ -1590,7 +1657,7 @@ static s32 e1000_check_polarity_m88(struct e1000_hw *hw) | |||
1590 | * Polarity is determined based on the PHY port status register, and the | 1657 | * Polarity is determined based on the PHY port status register, and the |
1591 | * current speed (since there is no polarity at 100Mbps). | 1658 | * current speed (since there is no polarity at 100Mbps). |
1592 | **/ | 1659 | **/ |
1593 | static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | 1660 | s32 e1000_check_polarity_igp(struct e1000_hw *hw) |
1594 | { | 1661 | { |
1595 | struct e1000_phy_info *phy = &hw->phy; | 1662 | struct e1000_phy_info *phy = &hw->phy; |
1596 | s32 ret_val; | 1663 | s32 ret_val; |
@@ -1628,6 +1695,39 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | |||
1628 | } | 1695 | } |
1629 | 1696 | ||
1630 | /** | 1697 | /** |
1698 | * e1000_check_polarity_ife - Check cable polarity for IFE PHY | ||
1699 | * @hw: pointer to the HW structure | ||
1700 | * | ||
1701 | * Polarity is determined on the polarity reversal feature being enabled. | ||
1702 | **/ | ||
1703 | s32 e1000_check_polarity_ife(struct e1000_hw *hw) | ||
1704 | { | ||
1705 | struct e1000_phy_info *phy = &hw->phy; | ||
1706 | s32 ret_val; | ||
1707 | u16 phy_data, offset, mask; | ||
1708 | |||
1709 | /* | ||
1710 | * Polarity is determined based on the reversal feature being enabled. | ||
1711 | */ | ||
1712 | if (phy->polarity_correction) { | ||
1713 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | ||
1714 | mask = IFE_PESC_POLARITY_REVERSED; | ||
1715 | } else { | ||
1716 | offset = IFE_PHY_SPECIAL_CONTROL; | ||
1717 | mask = IFE_PSC_FORCE_POLARITY; | ||
1718 | } | ||
1719 | |||
1720 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1721 | |||
1722 | if (!ret_val) | ||
1723 | phy->cable_polarity = (phy_data & mask) | ||
1724 | ? e1000_rev_polarity_reversed | ||
1725 | : e1000_rev_polarity_normal; | ||
1726 | |||
1727 | return ret_val; | ||
1728 | } | ||
1729 | |||
1730 | /** | ||
1631 | * e1000_wait_autoneg - Wait for auto-neg completion | 1731 | * e1000_wait_autoneg - Wait for auto-neg completion |
1632 | * @hw: pointer to the HW structure | 1732 | * @hw: pointer to the HW structure |
1633 | * | 1733 | * |
@@ -1727,15 +1827,21 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | |||
1727 | 1827 | ||
1728 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 1828 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); |
1729 | if (ret_val) | 1829 | if (ret_val) |
1730 | return ret_val; | 1830 | goto out; |
1731 | 1831 | ||
1732 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | 1832 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> |
1733 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; | 1833 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; |
1834 | if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { | ||
1835 | ret_val = -E1000_ERR_PHY; | ||
1836 | goto out; | ||
1837 | } | ||
1838 | |||
1734 | phy->min_cable_length = e1000_m88_cable_length_table[index]; | 1839 | phy->min_cable_length = e1000_m88_cable_length_table[index]; |
1735 | phy->max_cable_length = e1000_m88_cable_length_table[index+1]; | 1840 | phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; |
1736 | 1841 | ||
1737 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | 1842 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; |
1738 | 1843 | ||
1844 | out: | ||
1739 | return ret_val; | 1845 | return ret_val; |
1740 | } | 1846 | } |
1741 | 1847 | ||
@@ -1746,7 +1852,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | |||
1746 | * The automatic gain control (agc) normalizes the amplitude of the | 1852 | * The automatic gain control (agc) normalizes the amplitude of the |
1747 | * received signal, adjusting for the attenuation produced by the | 1853 | * received signal, adjusting for the attenuation produced by the |
1748 | * cable. By reading the AGC registers, which represent the | 1854 | * cable. By reading the AGC registers, which represent the |
1749 | * combination of course and fine gain value, the value can be put | 1855 | * combination of coarse and fine gain value, the value can be put |
1750 | * into a lookup table to obtain the approximate cable length | 1856 | * into a lookup table to obtain the approximate cable length |
1751 | * for each channel. | 1857 | * for each channel. |
1752 | **/ | 1858 | **/ |
@@ -1771,7 +1877,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) | |||
1771 | 1877 | ||
1772 | /* | 1878 | /* |
1773 | * Getting bits 15:9, which represent the combination of | 1879 | * Getting bits 15:9, which represent the combination of |
1774 | * course and fine gain values. The result is a number | 1880 | * coarse and fine gain values. The result is a number |
1775 | * that can be put into the lookup table to obtain the | 1881 | * that can be put into the lookup table to obtain the |
1776 | * approximate cable length. | 1882 | * approximate cable length. |
1777 | */ | 1883 | */ |
@@ -1825,8 +1931,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1825 | u16 phy_data; | 1931 | u16 phy_data; |
1826 | bool link; | 1932 | bool link; |
1827 | 1933 | ||
1828 | if (hw->phy.media_type != e1000_media_type_copper) { | 1934 | if (phy->media_type != e1000_media_type_copper) { |
1829 | hw_dbg(hw, "Phy info is only valid for copper media\n"); | 1935 | e_dbg("Phy info is only valid for copper media\n"); |
1830 | return -E1000_ERR_CONFIG; | 1936 | return -E1000_ERR_CONFIG; |
1831 | } | 1937 | } |
1832 | 1938 | ||
@@ -1835,7 +1941,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1835 | return ret_val; | 1941 | return ret_val; |
1836 | 1942 | ||
1837 | if (!link) { | 1943 | if (!link) { |
1838 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1944 | e_dbg("Phy info is only valid if link is up\n"); |
1839 | return -E1000_ERR_CONFIG; | 1945 | return -E1000_ERR_CONFIG; |
1840 | } | 1946 | } |
1841 | 1947 | ||
@@ -1903,11 +2009,11 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1903 | return ret_val; | 2009 | return ret_val; |
1904 | 2010 | ||
1905 | if (!link) { | 2011 | if (!link) { |
1906 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 2012 | e_dbg("Phy info is only valid if link is up\n"); |
1907 | return -E1000_ERR_CONFIG; | 2013 | return -E1000_ERR_CONFIG; |
1908 | } | 2014 | } |
1909 | 2015 | ||
1910 | phy->polarity_correction = 1; | 2016 | phy->polarity_correction = true; |
1911 | 2017 | ||
1912 | ret_val = e1000_check_polarity_igp(hw); | 2018 | ret_val = e1000_check_polarity_igp(hw); |
1913 | if (ret_val) | 2019 | if (ret_val) |
@@ -1946,6 +2052,61 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1946 | } | 2052 | } |
1947 | 2053 | ||
1948 | /** | 2054 | /** |
2055 | * e1000_get_phy_info_ife - Retrieves various IFE PHY states | ||
2056 | * @hw: pointer to the HW structure | ||
2057 | * | ||
2058 | * Populates "phy" structure with various feature states. | ||
2059 | **/ | ||
2060 | s32 e1000_get_phy_info_ife(struct e1000_hw *hw) | ||
2061 | { | ||
2062 | struct e1000_phy_info *phy = &hw->phy; | ||
2063 | s32 ret_val; | ||
2064 | u16 data; | ||
2065 | bool link; | ||
2066 | |||
2067 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
2068 | if (ret_val) | ||
2069 | goto out; | ||
2070 | |||
2071 | if (!link) { | ||
2072 | e_dbg("Phy info is only valid if link is up\n"); | ||
2073 | ret_val = -E1000_ERR_CONFIG; | ||
2074 | goto out; | ||
2075 | } | ||
2076 | |||
2077 | ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | ||
2078 | if (ret_val) | ||
2079 | goto out; | ||
2080 | phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) | ||
2081 | ? false : true; | ||
2082 | |||
2083 | if (phy->polarity_correction) { | ||
2084 | ret_val = e1000_check_polarity_ife(hw); | ||
2085 | if (ret_val) | ||
2086 | goto out; | ||
2087 | } else { | ||
2088 | /* Polarity is forced */ | ||
2089 | phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | ||
2090 | ? e1000_rev_polarity_reversed | ||
2091 | : e1000_rev_polarity_normal; | ||
2092 | } | ||
2093 | |||
2094 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
2095 | if (ret_val) | ||
2096 | goto out; | ||
2097 | |||
2098 | phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; | ||
2099 | |||
2100 | /* The following parameters are undefined for 10/100 operation. */ | ||
2101 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
2102 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
2103 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
2104 | |||
2105 | out: | ||
2106 | return ret_val; | ||
2107 | } | ||
2108 | |||
2109 | /** | ||
1949 | * e1000e_phy_sw_reset - PHY software reset | 2110 | * e1000e_phy_sw_reset - PHY software reset |
1950 | * @hw: pointer to the HW structure | 2111 | * @hw: pointer to the HW structure |
1951 | * | 2112 | * |
@@ -1990,7 +2151,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
1990 | if (ret_val) | 2151 | if (ret_val) |
1991 | return 0; | 2152 | return 0; |
1992 | 2153 | ||
1993 | ret_val = phy->ops.acquire_phy(hw); | 2154 | ret_val = phy->ops.acquire(hw); |
1994 | if (ret_val) | 2155 | if (ret_val) |
1995 | return ret_val; | 2156 | return ret_val; |
1996 | 2157 | ||
@@ -2005,7 +2166,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
2005 | 2166 | ||
2006 | udelay(150); | 2167 | udelay(150); |
2007 | 2168 | ||
2008 | phy->ops.release_phy(hw); | 2169 | phy->ops.release(hw); |
2009 | 2170 | ||
2010 | return e1000_get_phy_cfg_done(hw); | 2171 | return e1000_get_phy_cfg_done(hw); |
2011 | } | 2172 | } |
@@ -2031,7 +2192,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw) | |||
2031 | **/ | 2192 | **/ |
2032 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) | 2193 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) |
2033 | { | 2194 | { |
2034 | hw_dbg(hw, "Running IGP 3 PHY init script\n"); | 2195 | e_dbg("Running IGP 3 PHY init script\n"); |
2035 | 2196 | ||
2036 | /* PHY init IGP 3 */ | 2197 | /* PHY init IGP 3 */ |
2037 | /* Enable rise/fall, 10-mode work in class-A */ | 2198 | /* Enable rise/fall, 10-mode work in class-A */ |
@@ -2199,28 +2360,34 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) | |||
2199 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) | 2360 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) |
2200 | { | 2361 | { |
2201 | s32 ret_val = -E1000_ERR_PHY_TYPE; | 2362 | s32 ret_val = -E1000_ERR_PHY_TYPE; |
2202 | u32 phy_addr= 0; | 2363 | u32 phy_addr = 0; |
2203 | u32 i = 0; | 2364 | u32 i; |
2204 | enum e1000_phy_type phy_type = e1000_phy_unknown; | 2365 | enum e1000_phy_type phy_type = e1000_phy_unknown; |
2205 | 2366 | ||
2206 | do { | 2367 | hw->phy.id = phy_type; |
2207 | for (phy_addr = 0; phy_addr < 4; phy_addr++) { | 2368 | |
2208 | hw->phy.addr = phy_addr; | 2369 | for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { |
2370 | hw->phy.addr = phy_addr; | ||
2371 | i = 0; | ||
2372 | |||
2373 | do { | ||
2209 | e1000e_get_phy_id(hw); | 2374 | e1000e_get_phy_id(hw); |
2210 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); | 2375 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); |
2211 | 2376 | ||
2212 | /* | 2377 | /* |
2213 | * If phy_type is valid, break - we found our | 2378 | * If phy_type is valid, break - we found our |
2214 | * PHY address | 2379 | * PHY address |
2215 | */ | 2380 | */ |
2216 | if (phy_type != e1000_phy_unknown) { | 2381 | if (phy_type != e1000_phy_unknown) { |
2217 | ret_val = 0; | 2382 | ret_val = 0; |
2218 | break; | 2383 | goto out; |
2219 | } | 2384 | } |
2220 | } | 2385 | msleep(1); |
2221 | i++; | 2386 | i++; |
2222 | } while ((ret_val != 0) && (i < 100)); | 2387 | } while (i < 10); |
2388 | } | ||
2223 | 2389 | ||
2390 | out: | ||
2224 | return ret_val; | 2391 | return ret_val; |
2225 | } | 2392 | } |
2226 | 2393 | ||
@@ -2256,7 +2423,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2256 | u32 page = offset >> IGP_PAGE_SHIFT; | 2423 | u32 page = offset >> IGP_PAGE_SHIFT; |
2257 | u32 page_shift = 0; | 2424 | u32 page_shift = 0; |
2258 | 2425 | ||
2259 | ret_val = hw->phy.ops.acquire_phy(hw); | 2426 | ret_val = hw->phy.ops.acquire(hw); |
2260 | if (ret_val) | 2427 | if (ret_val) |
2261 | return ret_val; | 2428 | return ret_val; |
2262 | 2429 | ||
@@ -2294,7 +2461,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2294 | data); | 2461 | data); |
2295 | 2462 | ||
2296 | out: | 2463 | out: |
2297 | hw->phy.ops.release_phy(hw); | 2464 | hw->phy.ops.release(hw); |
2298 | return ret_val; | 2465 | return ret_val; |
2299 | } | 2466 | } |
2300 | 2467 | ||
@@ -2315,7 +2482,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2315 | u32 page = offset >> IGP_PAGE_SHIFT; | 2482 | u32 page = offset >> IGP_PAGE_SHIFT; |
2316 | u32 page_shift = 0; | 2483 | u32 page_shift = 0; |
2317 | 2484 | ||
2318 | ret_val = hw->phy.ops.acquire_phy(hw); | 2485 | ret_val = hw->phy.ops.acquire(hw); |
2319 | if (ret_val) | 2486 | if (ret_val) |
2320 | return ret_val; | 2487 | return ret_val; |
2321 | 2488 | ||
@@ -2352,7 +2519,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2352 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2519 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2353 | data); | 2520 | data); |
2354 | out: | 2521 | out: |
2355 | hw->phy.ops.release_phy(hw); | 2522 | hw->phy.ops.release(hw); |
2356 | return ret_val; | 2523 | return ret_val; |
2357 | } | 2524 | } |
2358 | 2525 | ||
@@ -2371,7 +2538,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2371 | s32 ret_val; | 2538 | s32 ret_val; |
2372 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2539 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2373 | 2540 | ||
2374 | ret_val = hw->phy.ops.acquire_phy(hw); | 2541 | ret_val = hw->phy.ops.acquire(hw); |
2375 | if (ret_val) | 2542 | if (ret_val) |
2376 | return ret_val; | 2543 | return ret_val; |
2377 | 2544 | ||
@@ -2397,7 +2564,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2397 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2564 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2398 | data); | 2565 | data); |
2399 | out: | 2566 | out: |
2400 | hw->phy.ops.release_phy(hw); | 2567 | hw->phy.ops.release(hw); |
2401 | return ret_val; | 2568 | return ret_val; |
2402 | } | 2569 | } |
2403 | 2570 | ||
@@ -2415,7 +2582,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2415 | s32 ret_val; | 2582 | s32 ret_val; |
2416 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2583 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2417 | 2584 | ||
2418 | ret_val = hw->phy.ops.acquire_phy(hw); | 2585 | ret_val = hw->phy.ops.acquire(hw); |
2419 | if (ret_val) | 2586 | if (ret_val) |
2420 | return ret_val; | 2587 | return ret_val; |
2421 | 2588 | ||
@@ -2441,7 +2608,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2441 | data); | 2608 | data); |
2442 | 2609 | ||
2443 | out: | 2610 | out: |
2444 | hw->phy.ops.release_phy(hw); | 2611 | hw->phy.ops.release(hw); |
2445 | return ret_val; | 2612 | return ret_val; |
2446 | } | 2613 | } |
2447 | 2614 | ||
@@ -2474,7 +2641,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2474 | /* Gig must be disabled for MDIO accesses to page 800 */ | 2641 | /* Gig must be disabled for MDIO accesses to page 800 */ |
2475 | if ((hw->mac.type == e1000_pchlan) && | 2642 | if ((hw->mac.type == e1000_pchlan) && |
2476 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | 2643 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) |
2477 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); | 2644 | e_dbg("Attempting to access page 800 while gig enabled.\n"); |
2478 | 2645 | ||
2479 | /* All operations in this function are phy address 1 */ | 2646 | /* All operations in this function are phy address 1 */ |
2480 | hw->phy.addr = 1; | 2647 | hw->phy.addr = 1; |
@@ -2484,20 +2651,26 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2484 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); | 2651 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); |
2485 | 2652 | ||
2486 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); | 2653 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); |
2487 | if (ret_val) | 2654 | if (ret_val) { |
2655 | e_dbg("Could not read PHY page 769\n"); | ||
2488 | goto out; | 2656 | goto out; |
2657 | } | ||
2489 | 2658 | ||
2490 | /* First clear bit 4 to avoid a power state change */ | 2659 | /* First clear bit 4 to avoid a power state change */ |
2491 | phy_reg &= ~(BM_WUC_HOST_WU_BIT); | 2660 | phy_reg &= ~(BM_WUC_HOST_WU_BIT); |
2492 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | 2661 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); |
2493 | if (ret_val) | 2662 | if (ret_val) { |
2663 | e_dbg("Could not clear PHY page 769 bit 4\n"); | ||
2494 | goto out; | 2664 | goto out; |
2665 | } | ||
2495 | 2666 | ||
2496 | /* Write bit 2 = 1, and clear bit 4 to 769_17 */ | 2667 | /* Write bit 2 = 1, and clear bit 4 to 769_17 */ |
2497 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, | 2668 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, |
2498 | phy_reg | BM_WUC_ENABLE_BIT); | 2669 | phy_reg | BM_WUC_ENABLE_BIT); |
2499 | if (ret_val) | 2670 | if (ret_val) { |
2671 | e_dbg("Could not write PHY page 769 bit 2\n"); | ||
2500 | goto out; | 2672 | goto out; |
2673 | } | ||
2501 | 2674 | ||
2502 | /* Select page 800 */ | 2675 | /* Select page 800 */ |
2503 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | 2676 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -2505,21 +2678,25 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2505 | 2678 | ||
2506 | /* Write the page 800 offset value using opcode 0x11 */ | 2679 | /* Write the page 800 offset value using opcode 0x11 */ |
2507 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); | 2680 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); |
2508 | if (ret_val) | 2681 | if (ret_val) { |
2682 | e_dbg("Could not write address opcode to page 800\n"); | ||
2509 | goto out; | 2683 | goto out; |
2684 | } | ||
2510 | 2685 | ||
2511 | if (read) { | 2686 | if (read) { |
2512 | /* Read the page 800 value using opcode 0x12 */ | 2687 | /* Read the page 800 value using opcode 0x12 */ |
2513 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | 2688 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, |
2514 | data); | 2689 | data); |
2515 | } else { | 2690 | } else { |
2516 | /* Read the page 800 value using opcode 0x12 */ | 2691 | /* Write the page 800 value using opcode 0x12 */ |
2517 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | 2692 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, |
2518 | *data); | 2693 | *data); |
2519 | } | 2694 | } |
2520 | 2695 | ||
2521 | if (ret_val) | 2696 | if (ret_val) { |
2697 | e_dbg("Could not access data value from page 800\n"); | ||
2522 | goto out; | 2698 | goto out; |
2699 | } | ||
2523 | 2700 | ||
2524 | /* | 2701 | /* |
2525 | * Restore 769_17.2 to its original value | 2702 | * Restore 769_17.2 to its original value |
@@ -2530,12 +2707,53 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2530 | 2707 | ||
2531 | /* Clear 769_17.2 */ | 2708 | /* Clear 769_17.2 */ |
2532 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | 2709 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); |
2710 | if (ret_val) { | ||
2711 | e_dbg("Could not clear PHY page 769 bit 2\n"); | ||
2712 | goto out; | ||
2713 | } | ||
2533 | 2714 | ||
2534 | out: | 2715 | out: |
2535 | return ret_val; | 2716 | return ret_val; |
2536 | } | 2717 | } |
2537 | 2718 | ||
2538 | /** | 2719 | /** |
2720 | * e1000_power_up_phy_copper - Restore copper link in case of PHY power down | ||
2721 | * @hw: pointer to the HW structure | ||
2722 | * | ||
2723 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2724 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2725 | * settings. | ||
2726 | **/ | ||
2727 | void e1000_power_up_phy_copper(struct e1000_hw *hw) | ||
2728 | { | ||
2729 | u16 mii_reg = 0; | ||
2730 | |||
2731 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2732 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2733 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2734 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2735 | } | ||
2736 | |||
2737 | /** | ||
2738 | * e1000_power_down_phy_copper - Restore copper link in case of PHY power down | ||
2739 | * @hw: pointer to the HW structure | ||
2740 | * | ||
2741 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2742 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2743 | * settings. | ||
2744 | **/ | ||
2745 | void e1000_power_down_phy_copper(struct e1000_hw *hw) | ||
2746 | { | ||
2747 | u16 mii_reg = 0; | ||
2748 | |||
2749 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2750 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2751 | mii_reg |= MII_CR_POWER_DOWN; | ||
2752 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2753 | msleep(1); | ||
2754 | } | ||
2755 | |||
2756 | /** | ||
2539 | * e1000e_commit_phy - Soft PHY reset | 2757 | * e1000e_commit_phy - Soft PHY reset |
2540 | * @hw: pointer to the HW structure | 2758 | * @hw: pointer to the HW structure |
2541 | * | 2759 | * |
@@ -2544,8 +2762,8 @@ out: | |||
2544 | **/ | 2762 | **/ |
2545 | s32 e1000e_commit_phy(struct e1000_hw *hw) | 2763 | s32 e1000e_commit_phy(struct e1000_hw *hw) |
2546 | { | 2764 | { |
2547 | if (hw->phy.ops.commit_phy) | 2765 | if (hw->phy.ops.commit) |
2548 | return hw->phy.ops.commit_phy(hw); | 2766 | return hw->phy.ops.commit(hw); |
2549 | 2767 | ||
2550 | return 0; | 2768 | return 0; |
2551 | } | 2769 | } |
@@ -2624,7 +2842,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2624 | bool in_slow_mode = false; | 2842 | bool in_slow_mode = false; |
2625 | 2843 | ||
2626 | if (!locked) { | 2844 | if (!locked) { |
2627 | ret_val = hw->phy.ops.acquire_phy(hw); | 2845 | ret_val = hw->phy.ops.acquire(hw); |
2628 | if (ret_val) | 2846 | if (ret_val) |
2629 | return ret_val; | 2847 | return ret_val; |
2630 | } | 2848 | } |
@@ -2658,19 +2876,18 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2658 | page = 0; | 2876 | page = 0; |
2659 | 2877 | ||
2660 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | 2878 | if (reg > MAX_PHY_MULTI_PAGE_REG) { |
2661 | if ((hw->phy.type != e1000_phy_82578) || | 2879 | u32 phy_addr = hw->phy.addr; |
2662 | ((reg != I82578_ADDR_REG) && | 2880 | |
2663 | (reg != I82578_ADDR_REG + 1))) { | 2881 | hw->phy.addr = 1; |
2664 | u32 phy_addr = hw->phy.addr; | 2882 | |
2665 | 2883 | /* Page is shifted left, PHY expects (page x 32) */ | |
2666 | hw->phy.addr = 1; | 2884 | ret_val = e1000e_write_phy_reg_mdic(hw, |
2667 | 2885 | IGP01E1000_PHY_PAGE_SELECT, | |
2668 | /* Page is shifted left, PHY expects (page x 32) */ | 2886 | (page << IGP_PAGE_SHIFT)); |
2669 | ret_val = e1000e_write_phy_reg_mdic(hw, | 2887 | hw->phy.addr = phy_addr; |
2670 | IGP01E1000_PHY_PAGE_SELECT, | 2888 | |
2671 | (page << IGP_PAGE_SHIFT)); | 2889 | if (ret_val) |
2672 | hw->phy.addr = phy_addr; | 2890 | goto out; |
2673 | } | ||
2674 | } | 2891 | } |
2675 | 2892 | ||
2676 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | 2893 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, |
@@ -2678,10 +2895,10 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2678 | out: | 2895 | out: |
2679 | /* Revert to MDIO fast mode, if applicable */ | 2896 | /* Revert to MDIO fast mode, if applicable */ |
2680 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | 2897 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) |
2681 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 2898 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2682 | 2899 | ||
2683 | if (!locked) | 2900 | if (!locked) |
2684 | hw->phy.ops.release_phy(hw); | 2901 | hw->phy.ops.release(hw); |
2685 | 2902 | ||
2686 | return ret_val; | 2903 | return ret_val; |
2687 | } | 2904 | } |
@@ -2734,7 +2951,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2734 | bool in_slow_mode = false; | 2951 | bool in_slow_mode = false; |
2735 | 2952 | ||
2736 | if (!locked) { | 2953 | if (!locked) { |
2737 | ret_val = hw->phy.ops.acquire_phy(hw); | 2954 | ret_val = hw->phy.ops.acquire(hw); |
2738 | if (ret_val) | 2955 | if (ret_val) |
2739 | return ret_val; | 2956 | return ret_val; |
2740 | } | 2957 | } |
@@ -2784,19 +3001,18 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2784 | } | 3001 | } |
2785 | 3002 | ||
2786 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | 3003 | if (reg > MAX_PHY_MULTI_PAGE_REG) { |
2787 | if ((hw->phy.type != e1000_phy_82578) || | 3004 | u32 phy_addr = hw->phy.addr; |
2788 | ((reg != I82578_ADDR_REG) && | 3005 | |
2789 | (reg != I82578_ADDR_REG + 1))) { | 3006 | hw->phy.addr = 1; |
2790 | u32 phy_addr = hw->phy.addr; | 3007 | |
2791 | 3008 | /* Page is shifted left, PHY expects (page x 32) */ | |
2792 | hw->phy.addr = 1; | 3009 | ret_val = e1000e_write_phy_reg_mdic(hw, |
2793 | 3010 | IGP01E1000_PHY_PAGE_SELECT, | |
2794 | /* Page is shifted left, PHY expects (page x 32) */ | 3011 | (page << IGP_PAGE_SHIFT)); |
2795 | ret_val = e1000e_write_phy_reg_mdic(hw, | 3012 | hw->phy.addr = phy_addr; |
2796 | IGP01E1000_PHY_PAGE_SELECT, | 3013 | |
2797 | (page << IGP_PAGE_SHIFT)); | 3014 | if (ret_val) |
2798 | hw->phy.addr = phy_addr; | 3015 | goto out; |
2799 | } | ||
2800 | } | 3016 | } |
2801 | 3017 | ||
2802 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | 3018 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, |
@@ -2805,10 +3021,10 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2805 | out: | 3021 | out: |
2806 | /* Revert to MDIO fast mode, if applicable */ | 3022 | /* Revert to MDIO fast mode, if applicable */ |
2807 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | 3023 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) |
2808 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 3024 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2809 | 3025 | ||
2810 | if (!locked) | 3026 | if (!locked) |
2811 | hw->phy.ops.release_phy(hw); | 3027 | hw->phy.ops.release(hw); |
2812 | 3028 | ||
2813 | return ret_val; | 3029 | return ret_val; |
2814 | } | 3030 | } |
@@ -2884,7 +3100,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2884 | /* masking with 0x3F to remove the page from offset */ | 3100 | /* masking with 0x3F to remove the page from offset */ |
2885 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | 3101 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); |
2886 | if (ret_val) { | 3102 | if (ret_val) { |
2887 | hw_dbg(hw, "Could not write PHY the HV address register\n"); | 3103 | e_dbg("Could not write PHY the HV address register\n"); |
2888 | goto out; | 3104 | goto out; |
2889 | } | 3105 | } |
2890 | 3106 | ||
@@ -2895,7 +3111,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2895 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | 3111 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); |
2896 | 3112 | ||
2897 | if (ret_val) { | 3113 | if (ret_val) { |
2898 | hw_dbg(hw, "Could not read data value from HV data register\n"); | 3114 | e_dbg("Could not read data value from HV data register\n"); |
2899 | goto out; | 3115 | goto out; |
2900 | } | 3116 | } |
2901 | 3117 | ||
@@ -2923,12 +3139,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2923 | goto out; | 3139 | goto out; |
2924 | 3140 | ||
2925 | /* Do not apply workaround if in PHY loopback bit 14 set */ | 3141 | /* Do not apply workaround if in PHY loopback bit 14 set */ |
2926 | hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data); | 3142 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); |
2927 | if (data & PHY_CONTROL_LB) | 3143 | if (data & PHY_CONTROL_LB) |
2928 | goto out; | 3144 | goto out; |
2929 | 3145 | ||
2930 | /* check if link is up and at 1Gbps */ | 3146 | /* check if link is up and at 1Gbps */ |
2931 | ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); | 3147 | ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); |
2932 | if (ret_val) | 3148 | if (ret_val) |
2933 | goto out; | 3149 | goto out; |
2934 | 3150 | ||
@@ -2944,13 +3160,13 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2944 | mdelay(200); | 3160 | mdelay(200); |
2945 | 3161 | ||
2946 | /* flush the packets in the fifo buffer */ | 3162 | /* flush the packets in the fifo buffer */ |
2947 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 3163 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2948 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 3164 | HV_MUX_DATA_CTRL_GEN_TO_MAC | |
2949 | HV_MUX_DATA_CTRL_FORCE_SPEED); | 3165 | HV_MUX_DATA_CTRL_FORCE_SPEED); |
2950 | if (ret_val) | 3166 | if (ret_val) |
2951 | goto out; | 3167 | goto out; |
2952 | 3168 | ||
2953 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 3169 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2954 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | 3170 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
2955 | 3171 | ||
2956 | out: | 3172 | out: |
@@ -2971,7 +3187,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) | |||
2971 | s32 ret_val; | 3187 | s32 ret_val; |
2972 | u16 data; | 3188 | u16 data; |
2973 | 3189 | ||
2974 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 3190 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
2975 | 3191 | ||
2976 | if (!ret_val) | 3192 | if (!ret_val) |
2977 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | 3193 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) |
@@ -2996,13 +3212,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
2996 | u16 phy_data; | 3212 | u16 phy_data; |
2997 | bool link; | 3213 | bool link; |
2998 | 3214 | ||
2999 | ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); | 3215 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); |
3000 | if (ret_val) | 3216 | if (ret_val) |
3001 | goto out; | 3217 | goto out; |
3002 | 3218 | ||
3003 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | 3219 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); |
3004 | 3220 | ||
3005 | ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data); | 3221 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); |
3006 | if (ret_val) | 3222 | if (ret_val) |
3007 | goto out; | 3223 | goto out; |
3008 | 3224 | ||
@@ -3010,23 +3226,23 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3010 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI | 3226 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI |
3011 | * forced whenever speed and duplex are forced. | 3227 | * forced whenever speed and duplex are forced. |
3012 | */ | 3228 | */ |
3013 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data); | 3229 | ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); |
3014 | if (ret_val) | 3230 | if (ret_val) |
3015 | goto out; | 3231 | goto out; |
3016 | 3232 | ||
3017 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; | 3233 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; |
3018 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; | 3234 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; |
3019 | 3235 | ||
3020 | ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data); | 3236 | ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); |
3021 | if (ret_val) | 3237 | if (ret_val) |
3022 | goto out; | 3238 | goto out; |
3023 | 3239 | ||
3024 | hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data); | 3240 | e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data); |
3025 | 3241 | ||
3026 | udelay(1); | 3242 | udelay(1); |
3027 | 3243 | ||
3028 | if (phy->autoneg_wait_to_complete) { | 3244 | if (phy->autoneg_wait_to_complete) { |
3029 | hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n"); | 3245 | e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); |
3030 | 3246 | ||
3031 | ret_val = e1000e_phy_has_link_generic(hw, | 3247 | ret_val = e1000e_phy_has_link_generic(hw, |
3032 | PHY_FORCE_LIMIT, | 3248 | PHY_FORCE_LIMIT, |
@@ -3036,7 +3252,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3036 | goto out; | 3252 | goto out; |
3037 | 3253 | ||
3038 | if (!link) | 3254 | if (!link) |
3039 | hw_dbg(hw, "Link taking longer than expected.\n"); | 3255 | e_dbg("Link taking longer than expected.\n"); |
3040 | 3256 | ||
3041 | /* Try once more */ | 3257 | /* Try once more */ |
3042 | ret_val = e1000e_phy_has_link_generic(hw, | 3258 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -3072,7 +3288,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3072 | goto out; | 3288 | goto out; |
3073 | 3289 | ||
3074 | if (!link) { | 3290 | if (!link) { |
3075 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 3291 | e_dbg("Phy info is only valid if link is up\n"); |
3076 | ret_val = -E1000_ERR_CONFIG; | 3292 | ret_val = -E1000_ERR_CONFIG; |
3077 | goto out; | 3293 | goto out; |
3078 | } | 3294 | } |
@@ -3083,7 +3299,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3083 | if (ret_val) | 3299 | if (ret_val) |
3084 | goto out; | 3300 | goto out; |
3085 | 3301 | ||
3086 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 3302 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
3087 | if (ret_val) | 3303 | if (ret_val) |
3088 | goto out; | 3304 | goto out; |
3089 | 3305 | ||
@@ -3095,7 +3311,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3095 | if (ret_val) | 3311 | if (ret_val) |
3096 | goto out; | 3312 | goto out; |
3097 | 3313 | ||
3098 | ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data); | 3314 | ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); |
3099 | if (ret_val) | 3315 | if (ret_val) |
3100 | goto out; | 3316 | goto out; |
3101 | 3317 | ||
@@ -3129,7 +3345,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3129 | s32 ret_val; | 3345 | s32 ret_val; |
3130 | u16 phy_data, length; | 3346 | u16 phy_data, length; |
3131 | 3347 | ||
3132 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); | 3348 | ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); |
3133 | if (ret_val) | 3349 | if (ret_val) |
3134 | goto out; | 3350 | goto out; |
3135 | 3351 | ||
@@ -3137,7 +3353,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3137 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; | 3353 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; |
3138 | 3354 | ||
3139 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | 3355 | if (length == E1000_CABLE_LENGTH_UNDEFINED) |
3140 | ret_val = E1000_ERR_PHY; | 3356 | ret_val = -E1000_ERR_PHY; |
3141 | 3357 | ||
3142 | phy->cable_length = length; | 3358 | phy->cable_length = length; |
3143 | 3359 | ||