diff options
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r-- | drivers/net/e1000e/82571.c | 209 | ||||
-rw-r--r-- | drivers/net/e1000e/defines.h | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 26 | ||||
-rw-r--r-- | drivers/net/e1000e/es2lan.c | 170 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 78 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 45 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 291 | ||||
-rw-r--r-- | drivers/net/e1000e/lib.c | 235 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 331 | ||||
-rw-r--r-- | drivers/net/e1000e/param.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 208 |
11 files changed, 785 insertions, 812 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index d1e0563a67df..62bbc6e0a76a 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -43,10 +43,6 @@ | |||
43 | * 82583V Gigabit Network Connection | 43 | * 82583V Gigabit Network Connection |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/delay.h> | ||
48 | #include <linux/pci.h> | ||
49 | |||
50 | #include "e1000.h" | 46 | #include "e1000.h" |
51 | 47 | ||
52 | #define ID_LED_RESERVED_F746 0xF746 | 48 | #define ID_LED_RESERVED_F746 0xF746 |
@@ -76,8 +72,6 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | |||
76 | /** | 72 | /** |
77 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | 73 | * e1000_init_phy_params_82571 - Init PHY func ptrs. |
78 | * @hw: pointer to the HW structure | 74 | * @hw: pointer to the HW structure |
79 | * | ||
80 | * This is a function pointer entry point called by the api module. | ||
81 | **/ | 75 | **/ |
82 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | 76 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) |
83 | { | 77 | { |
@@ -140,8 +134,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
140 | /** | 134 | /** |
141 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. | 135 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. |
142 | * @hw: pointer to the HW structure | 136 | * @hw: pointer to the HW structure |
143 | * | ||
144 | * This is a function pointer entry point called by the api module. | ||
145 | **/ | 137 | **/ |
146 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | 138 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) |
147 | { | 139 | { |
@@ -205,8 +197,6 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
205 | /** | 197 | /** |
206 | * e1000_init_mac_params_82571 - Init MAC func ptrs. | 198 | * e1000_init_mac_params_82571 - Init MAC func ptrs. |
207 | * @hw: pointer to the HW structure | 199 | * @hw: pointer to the HW structure |
208 | * | ||
209 | * This is a function pointer entry point called by the api module. | ||
210 | **/ | 200 | **/ |
211 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | 201 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) |
212 | { | 202 | { |
@@ -240,7 +230,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
240 | /* Set rar entry count */ | 230 | /* Set rar entry count */ |
241 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 231 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
242 | /* Set if manageability features are enabled. */ | 232 | /* Set if manageability features are enabled. */ |
243 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 233 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
234 | ? true : false; | ||
244 | 235 | ||
245 | /* check for link */ | 236 | /* check for link */ |
246 | switch (hw->phy.media_type) { | 237 | switch (hw->phy.media_type) { |
@@ -313,7 +304,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
313 | * indicates that the bootagent or EFI code has | 304 | * indicates that the bootagent or EFI code has |
314 | * improperly left this bit enabled | 305 | * improperly left this bit enabled |
315 | */ | 306 | */ |
316 | hw_dbg(hw, "Please update your 82571 Bootagent\n"); | 307 | e_dbg("Please update your 82571 Bootagent\n"); |
317 | } | 308 | } |
318 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | 309 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); |
319 | } | 310 | } |
@@ -487,7 +478,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
487 | } | 478 | } |
488 | 479 | ||
489 | if (i == sw_timeout) { | 480 | if (i == sw_timeout) { |
490 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 481 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
491 | hw->dev_spec.e82571.smb_counter++; | 482 | hw->dev_spec.e82571.smb_counter++; |
492 | } | 483 | } |
493 | /* Get the FW semaphore. */ | 484 | /* Get the FW semaphore. */ |
@@ -505,7 +496,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
505 | if (i == fw_timeout) { | 496 | if (i == fw_timeout) { |
506 | /* Release semaphores */ | 497 | /* Release semaphores */ |
507 | e1000_put_hw_semaphore_82571(hw); | 498 | e1000_put_hw_semaphore_82571(hw); |
508 | hw_dbg(hw, "Driver can't access the NVM\n"); | 499 | e_dbg("Driver can't access the NVM\n"); |
509 | return -E1000_ERR_NVM; | 500 | return -E1000_ERR_NVM; |
510 | } | 501 | } |
511 | 502 | ||
@@ -702,8 +693,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
702 | u16 words, u16 *data) | 693 | u16 words, u16 *data) |
703 | { | 694 | { |
704 | struct e1000_nvm_info *nvm = &hw->nvm; | 695 | struct e1000_nvm_info *nvm = &hw->nvm; |
705 | u32 i; | 696 | u32 i, eewr = 0; |
706 | u32 eewr = 0; | ||
707 | s32 ret_val = 0; | 697 | s32 ret_val = 0; |
708 | 698 | ||
709 | /* | 699 | /* |
@@ -712,7 +702,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
712 | */ | 702 | */ |
713 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 703 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
714 | (words == 0)) { | 704 | (words == 0)) { |
715 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 705 | e_dbg("nvm parameter(s) out of bounds\n"); |
716 | return -E1000_ERR_NVM; | 706 | return -E1000_ERR_NVM; |
717 | } | 707 | } |
718 | 708 | ||
@@ -753,7 +743,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
753 | timeout--; | 743 | timeout--; |
754 | } | 744 | } |
755 | if (!timeout) { | 745 | if (!timeout) { |
756 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 746 | e_dbg("MNG configuration cycle has not completed.\n"); |
757 | return -E1000_ERR_RESET; | 747 | return -E1000_ERR_RESET; |
758 | } | 748 | } |
759 | 749 | ||
@@ -763,7 +753,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
763 | /** | 753 | /** |
764 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state | 754 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state |
765 | * @hw: pointer to the HW structure | 755 | * @hw: pointer to the HW structure |
766 | * @active: TRUE to enable LPLU, FALSE to disable | 756 | * @active: true to enable LPLU, false to disable |
767 | * | 757 | * |
768 | * Sets the LPLU D0 state according to the active flag. When activating LPLU | 758 | * Sets the LPLU D0 state according to the active flag. When activating LPLU |
769 | * this function also disables smart speed and vice versa. LPLU will not be | 759 | * this function also disables smart speed and vice versa. LPLU will not be |
@@ -834,15 +824,11 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
834 | * e1000_reset_hw_82571 - Reset hardware | 824 | * e1000_reset_hw_82571 - Reset hardware |
835 | * @hw: pointer to the HW structure | 825 | * @hw: pointer to the HW structure |
836 | * | 826 | * |
837 | * This resets the hardware into a known state. This is a | 827 | * This resets the hardware into a known state. |
838 | * function pointer entry point called by the api module. | ||
839 | **/ | 828 | **/ |
840 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 829 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
841 | { | 830 | { |
842 | u32 ctrl; | 831 | u32 ctrl, extcnf_ctrl, ctrl_ext, icr; |
843 | u32 extcnf_ctrl; | ||
844 | u32 ctrl_ext; | ||
845 | u32 icr; | ||
846 | s32 ret_val; | 832 | s32 ret_val; |
847 | u16 i = 0; | 833 | u16 i = 0; |
848 | 834 | ||
@@ -852,9 +838,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
852 | */ | 838 | */ |
853 | ret_val = e1000e_disable_pcie_master(hw); | 839 | ret_val = e1000e_disable_pcie_master(hw); |
854 | if (ret_val) | 840 | if (ret_val) |
855 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 841 | e_dbg("PCI-E Master disable polling has failed.\n"); |
856 | 842 | ||
857 | hw_dbg(hw, "Masking off all interrupts\n"); | 843 | e_dbg("Masking off all interrupts\n"); |
858 | ew32(IMC, 0xffffffff); | 844 | ew32(IMC, 0xffffffff); |
859 | 845 | ||
860 | ew32(RCTL, 0); | 846 | ew32(RCTL, 0); |
@@ -893,7 +879,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
893 | 879 | ||
894 | ctrl = er32(CTRL); | 880 | ctrl = er32(CTRL); |
895 | 881 | ||
896 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 882 | e_dbg("Issuing a global reset to MAC\n"); |
897 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 883 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
898 | 884 | ||
899 | if (hw->nvm.type == e1000_nvm_flash_hw) { | 885 | if (hw->nvm.type == e1000_nvm_flash_hw) { |
@@ -951,20 +937,18 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
951 | struct e1000_mac_info *mac = &hw->mac; | 937 | struct e1000_mac_info *mac = &hw->mac; |
952 | u32 reg_data; | 938 | u32 reg_data; |
953 | s32 ret_val; | 939 | s32 ret_val; |
954 | u16 i; | 940 | u16 i, rar_count = mac->rar_entry_count; |
955 | u16 rar_count = mac->rar_entry_count; | ||
956 | 941 | ||
957 | e1000_initialize_hw_bits_82571(hw); | 942 | e1000_initialize_hw_bits_82571(hw); |
958 | 943 | ||
959 | /* Initialize identification LED */ | 944 | /* Initialize identification LED */ |
960 | ret_val = e1000e_id_led_init(hw); | 945 | ret_val = e1000e_id_led_init(hw); |
961 | if (ret_val) { | 946 | if (ret_val) |
962 | hw_dbg(hw, "Error initializing identification LED\n"); | 947 | e_dbg("Error initializing identification LED\n"); |
963 | return ret_val; | 948 | /* This is not fatal and we should not stop init due to this */ |
964 | } | ||
965 | 949 | ||
966 | /* Disabling VLAN filtering */ | 950 | /* Disabling VLAN filtering */ |
967 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 951 | e_dbg("Initializing the IEEE VLAN\n"); |
968 | e1000e_clear_vfta(hw); | 952 | e1000e_clear_vfta(hw); |
969 | 953 | ||
970 | /* Setup the receive address. */ | 954 | /* Setup the receive address. */ |
@@ -978,7 +962,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
978 | e1000e_init_rx_addrs(hw, rar_count); | 962 | e1000e_init_rx_addrs(hw, rar_count); |
979 | 963 | ||
980 | /* Zero out the Multicast HASH table */ | 964 | /* Zero out the Multicast HASH table */ |
981 | hw_dbg(hw, "Zeroing the MTA\n"); | 965 | e_dbg("Zeroing the MTA\n"); |
982 | for (i = 0; i < mac->mta_reg_count; i++) | 966 | for (i = 0; i < mac->mta_reg_count; i++) |
983 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 967 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
984 | 968 | ||
@@ -1125,6 +1109,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1125 | reg |= (1 << 22); | 1109 | reg |= (1 << 22); |
1126 | ew32(GCR, reg); | 1110 | ew32(GCR, reg); |
1127 | 1111 | ||
1112 | /* | ||
1113 | * Workaround for hardware errata. | ||
1114 | * apply workaround for hardware errata documented in errata | ||
1115 | * docs Fixes issue where some error prone or unreliable PCIe | ||
1116 | * completions are occurring, particularly with ASPM enabled. | ||
1117 | * Without fix, issue can cause tx timeouts. | ||
1118 | */ | ||
1128 | reg = er32(GCR2); | 1119 | reg = er32(GCR2); |
1129 | reg |= 1; | 1120 | reg |= 1; |
1130 | ew32(GCR2, reg); | 1121 | ew32(GCR2, reg); |
@@ -1387,7 +1378,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1387 | */ | 1378 | */ |
1388 | mac->serdes_link_state = | 1379 | mac->serdes_link_state = |
1389 | e1000_serdes_link_autoneg_progress; | 1380 | e1000_serdes_link_autoneg_progress; |
1390 | hw_dbg(hw, "AN_UP -> AN_PROG\n"); | 1381 | e_dbg("AN_UP -> AN_PROG\n"); |
1391 | } | 1382 | } |
1392 | break; | 1383 | break; |
1393 | 1384 | ||
@@ -1405,7 +1396,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1405 | (ctrl & ~E1000_CTRL_SLU)); | 1396 | (ctrl & ~E1000_CTRL_SLU)); |
1406 | mac->serdes_link_state = | 1397 | mac->serdes_link_state = |
1407 | e1000_serdes_link_autoneg_progress; | 1398 | e1000_serdes_link_autoneg_progress; |
1408 | hw_dbg(hw, "FORCED_UP -> AN_PROG\n"); | 1399 | e_dbg("FORCED_UP -> AN_PROG\n"); |
1409 | } | 1400 | } |
1410 | break; | 1401 | break; |
1411 | 1402 | ||
@@ -1419,7 +1410,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1419 | if (status & E1000_STATUS_LU) { | 1410 | if (status & E1000_STATUS_LU) { |
1420 | mac->serdes_link_state = | 1411 | mac->serdes_link_state = |
1421 | e1000_serdes_link_autoneg_complete; | 1412 | e1000_serdes_link_autoneg_complete; |
1422 | hw_dbg(hw, "AN_PROG -> AN_UP\n"); | 1413 | e_dbg("AN_PROG -> AN_UP\n"); |
1423 | } else { | 1414 | } else { |
1424 | /* | 1415 | /* |
1425 | * Disable autoneg, force link up and | 1416 | * Disable autoneg, force link up and |
@@ -1434,12 +1425,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1434 | ret_val = | 1425 | ret_val = |
1435 | e1000e_config_fc_after_link_up(hw); | 1426 | e1000e_config_fc_after_link_up(hw); |
1436 | if (ret_val) { | 1427 | if (ret_val) { |
1437 | hw_dbg(hw, "Error config flow control\n"); | 1428 | e_dbg("Error config flow control\n"); |
1438 | break; | 1429 | break; |
1439 | } | 1430 | } |
1440 | mac->serdes_link_state = | 1431 | mac->serdes_link_state = |
1441 | e1000_serdes_link_forced_up; | 1432 | e1000_serdes_link_forced_up; |
1442 | hw_dbg(hw, "AN_PROG -> FORCED_UP\n"); | 1433 | e_dbg("AN_PROG -> FORCED_UP\n"); |
1443 | } | 1434 | } |
1444 | mac->serdes_has_link = true; | 1435 | mac->serdes_has_link = true; |
1445 | break; | 1436 | break; |
@@ -1454,14 +1445,14 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1454 | (ctrl & ~E1000_CTRL_SLU)); | 1445 | (ctrl & ~E1000_CTRL_SLU)); |
1455 | mac->serdes_link_state = | 1446 | mac->serdes_link_state = |
1456 | e1000_serdes_link_autoneg_progress; | 1447 | e1000_serdes_link_autoneg_progress; |
1457 | hw_dbg(hw, "DOWN -> AN_PROG\n"); | 1448 | e_dbg("DOWN -> AN_PROG\n"); |
1458 | break; | 1449 | break; |
1459 | } | 1450 | } |
1460 | } else { | 1451 | } else { |
1461 | if (!(rxcw & E1000_RXCW_SYNCH)) { | 1452 | if (!(rxcw & E1000_RXCW_SYNCH)) { |
1462 | mac->serdes_has_link = false; | 1453 | mac->serdes_has_link = false; |
1463 | mac->serdes_link_state = e1000_serdes_link_down; | 1454 | mac->serdes_link_state = e1000_serdes_link_down; |
1464 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1455 | e_dbg("ANYSTATE -> DOWN\n"); |
1465 | } else { | 1456 | } else { |
1466 | /* | 1457 | /* |
1467 | * We have sync, and can tolerate one | 1458 | * We have sync, and can tolerate one |
@@ -1473,7 +1464,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1473 | if (rxcw & E1000_RXCW_IV) { | 1464 | if (rxcw & E1000_RXCW_IV) { |
1474 | mac->serdes_link_state = e1000_serdes_link_down; | 1465 | mac->serdes_link_state = e1000_serdes_link_down; |
1475 | mac->serdes_has_link = false; | 1466 | mac->serdes_has_link = false; |
1476 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1467 | e_dbg("ANYSTATE -> DOWN\n"); |
1477 | } | 1468 | } |
1478 | } | 1469 | } |
1479 | } | 1470 | } |
@@ -1495,7 +1486,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1495 | 1486 | ||
1496 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1487 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1497 | if (ret_val) { | 1488 | if (ret_val) { |
1498 | hw_dbg(hw, "NVM Read Error\n"); | 1489 | e_dbg("NVM Read Error\n"); |
1499 | return ret_val; | 1490 | return ret_val; |
1500 | } | 1491 | } |
1501 | 1492 | ||
@@ -1525,7 +1516,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1525 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | 1516 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) |
1526 | { | 1517 | { |
1527 | if (hw->mac.type != e1000_82571) | 1518 | if (hw->mac.type != e1000_82571) |
1528 | return 0; | 1519 | return false; |
1529 | 1520 | ||
1530 | return hw->dev_spec.e82571.laa_is_present; | 1521 | return hw->dev_spec.e82571.laa_is_present; |
1531 | } | 1522 | } |
@@ -1616,44 +1607,42 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1616 | **/ | 1607 | **/ |
1617 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) | 1608 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) |
1618 | { | 1609 | { |
1619 | u32 temp; | ||
1620 | |||
1621 | e1000e_clear_hw_cntrs_base(hw); | 1610 | e1000e_clear_hw_cntrs_base(hw); |
1622 | 1611 | ||
1623 | temp = er32(PRC64); | 1612 | er32(PRC64); |
1624 | temp = er32(PRC127); | 1613 | er32(PRC127); |
1625 | temp = er32(PRC255); | 1614 | er32(PRC255); |
1626 | temp = er32(PRC511); | 1615 | er32(PRC511); |
1627 | temp = er32(PRC1023); | 1616 | er32(PRC1023); |
1628 | temp = er32(PRC1522); | 1617 | er32(PRC1522); |
1629 | temp = er32(PTC64); | 1618 | er32(PTC64); |
1630 | temp = er32(PTC127); | 1619 | er32(PTC127); |
1631 | temp = er32(PTC255); | 1620 | er32(PTC255); |
1632 | temp = er32(PTC511); | 1621 | er32(PTC511); |
1633 | temp = er32(PTC1023); | 1622 | er32(PTC1023); |
1634 | temp = er32(PTC1522); | 1623 | er32(PTC1522); |
1635 | 1624 | ||
1636 | temp = er32(ALGNERRC); | 1625 | er32(ALGNERRC); |
1637 | temp = er32(RXERRC); | 1626 | er32(RXERRC); |
1638 | temp = er32(TNCRS); | 1627 | er32(TNCRS); |
1639 | temp = er32(CEXTERR); | 1628 | er32(CEXTERR); |
1640 | temp = er32(TSCTC); | 1629 | er32(TSCTC); |
1641 | temp = er32(TSCTFC); | 1630 | er32(TSCTFC); |
1642 | 1631 | ||
1643 | temp = er32(MGTPRC); | 1632 | er32(MGTPRC); |
1644 | temp = er32(MGTPDC); | 1633 | er32(MGTPDC); |
1645 | temp = er32(MGTPTC); | 1634 | er32(MGTPTC); |
1646 | 1635 | ||
1647 | temp = er32(IAC); | 1636 | er32(IAC); |
1648 | temp = er32(ICRXOC); | 1637 | er32(ICRXOC); |
1649 | 1638 | ||
1650 | temp = er32(ICRXPTC); | 1639 | er32(ICRXPTC); |
1651 | temp = er32(ICRXATC); | 1640 | er32(ICRXATC); |
1652 | temp = er32(ICTXPTC); | 1641 | er32(ICTXPTC); |
1653 | temp = er32(ICTXATC); | 1642 | er32(ICTXATC); |
1654 | temp = er32(ICTXQEC); | 1643 | er32(ICTXQEC); |
1655 | temp = er32(ICTXQMTC); | 1644 | er32(ICTXQMTC); |
1656 | temp = er32(ICRXDMTC); | 1645 | er32(ICRXDMTC); |
1657 | } | 1646 | } |
1658 | 1647 | ||
1659 | static struct e1000_mac_operations e82571_mac_ops = { | 1648 | static struct e1000_mac_operations e82571_mac_ops = { |
@@ -1675,64 +1664,64 @@ static struct e1000_mac_operations e82571_mac_ops = { | |||
1675 | }; | 1664 | }; |
1676 | 1665 | ||
1677 | static struct e1000_phy_operations e82_phy_ops_igp = { | 1666 | static struct e1000_phy_operations e82_phy_ops_igp = { |
1678 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1667 | .acquire = e1000_get_hw_semaphore_82571, |
1679 | .check_reset_block = e1000e_check_reset_block_generic, | 1668 | .check_reset_block = e1000e_check_reset_block_generic, |
1680 | .commit_phy = NULL, | 1669 | .commit = NULL, |
1681 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, | 1670 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, |
1682 | .get_cfg_done = e1000_get_cfg_done_82571, | 1671 | .get_cfg_done = e1000_get_cfg_done_82571, |
1683 | .get_cable_length = e1000e_get_cable_length_igp_2, | 1672 | .get_cable_length = e1000e_get_cable_length_igp_2, |
1684 | .get_phy_info = e1000e_get_phy_info_igp, | 1673 | .get_info = e1000e_get_phy_info_igp, |
1685 | .read_phy_reg = e1000e_read_phy_reg_igp, | 1674 | .read_reg = e1000e_read_phy_reg_igp, |
1686 | .release_phy = e1000_put_hw_semaphore_82571, | 1675 | .release = e1000_put_hw_semaphore_82571, |
1687 | .reset_phy = e1000e_phy_hw_reset_generic, | 1676 | .reset = e1000e_phy_hw_reset_generic, |
1688 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1677 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1689 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1678 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1690 | .write_phy_reg = e1000e_write_phy_reg_igp, | 1679 | .write_reg = e1000e_write_phy_reg_igp, |
1691 | .cfg_on_link_up = NULL, | 1680 | .cfg_on_link_up = NULL, |
1692 | }; | 1681 | }; |
1693 | 1682 | ||
1694 | static struct e1000_phy_operations e82_phy_ops_m88 = { | 1683 | static struct e1000_phy_operations e82_phy_ops_m88 = { |
1695 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1684 | .acquire = e1000_get_hw_semaphore_82571, |
1696 | .check_reset_block = e1000e_check_reset_block_generic, | 1685 | .check_reset_block = e1000e_check_reset_block_generic, |
1697 | .commit_phy = e1000e_phy_sw_reset, | 1686 | .commit = e1000e_phy_sw_reset, |
1698 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1687 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1699 | .get_cfg_done = e1000e_get_cfg_done, | 1688 | .get_cfg_done = e1000e_get_cfg_done, |
1700 | .get_cable_length = e1000e_get_cable_length_m88, | 1689 | .get_cable_length = e1000e_get_cable_length_m88, |
1701 | .get_phy_info = e1000e_get_phy_info_m88, | 1690 | .get_info = e1000e_get_phy_info_m88, |
1702 | .read_phy_reg = e1000e_read_phy_reg_m88, | 1691 | .read_reg = e1000e_read_phy_reg_m88, |
1703 | .release_phy = e1000_put_hw_semaphore_82571, | 1692 | .release = e1000_put_hw_semaphore_82571, |
1704 | .reset_phy = e1000e_phy_hw_reset_generic, | 1693 | .reset = e1000e_phy_hw_reset_generic, |
1705 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1694 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1706 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1695 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1707 | .write_phy_reg = e1000e_write_phy_reg_m88, | 1696 | .write_reg = e1000e_write_phy_reg_m88, |
1708 | .cfg_on_link_up = NULL, | 1697 | .cfg_on_link_up = NULL, |
1709 | }; | 1698 | }; |
1710 | 1699 | ||
1711 | static struct e1000_phy_operations e82_phy_ops_bm = { | 1700 | static struct e1000_phy_operations e82_phy_ops_bm = { |
1712 | .acquire_phy = e1000_get_hw_semaphore_82571, | 1701 | .acquire = e1000_get_hw_semaphore_82571, |
1713 | .check_reset_block = e1000e_check_reset_block_generic, | 1702 | .check_reset_block = e1000e_check_reset_block_generic, |
1714 | .commit_phy = e1000e_phy_sw_reset, | 1703 | .commit = e1000e_phy_sw_reset, |
1715 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | 1704 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, |
1716 | .get_cfg_done = e1000e_get_cfg_done, | 1705 | .get_cfg_done = e1000e_get_cfg_done, |
1717 | .get_cable_length = e1000e_get_cable_length_m88, | 1706 | .get_cable_length = e1000e_get_cable_length_m88, |
1718 | .get_phy_info = e1000e_get_phy_info_m88, | 1707 | .get_info = e1000e_get_phy_info_m88, |
1719 | .read_phy_reg = e1000e_read_phy_reg_bm2, | 1708 | .read_reg = e1000e_read_phy_reg_bm2, |
1720 | .release_phy = e1000_put_hw_semaphore_82571, | 1709 | .release = e1000_put_hw_semaphore_82571, |
1721 | .reset_phy = e1000e_phy_hw_reset_generic, | 1710 | .reset = e1000e_phy_hw_reset_generic, |
1722 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | 1711 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, |
1723 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1712 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1724 | .write_phy_reg = e1000e_write_phy_reg_bm2, | 1713 | .write_reg = e1000e_write_phy_reg_bm2, |
1725 | .cfg_on_link_up = NULL, | 1714 | .cfg_on_link_up = NULL, |
1726 | }; | 1715 | }; |
1727 | 1716 | ||
1728 | static struct e1000_nvm_operations e82571_nvm_ops = { | 1717 | static struct e1000_nvm_operations e82571_nvm_ops = { |
1729 | .acquire_nvm = e1000_acquire_nvm_82571, | 1718 | .acquire = e1000_acquire_nvm_82571, |
1730 | .read_nvm = e1000e_read_nvm_eerd, | 1719 | .read = e1000e_read_nvm_eerd, |
1731 | .release_nvm = e1000_release_nvm_82571, | 1720 | .release = e1000_release_nvm_82571, |
1732 | .update_nvm = e1000_update_nvm_checksum_82571, | 1721 | .update = e1000_update_nvm_checksum_82571, |
1733 | .valid_led_default = e1000_valid_led_default_82571, | 1722 | .valid_led_default = e1000_valid_led_default_82571, |
1734 | .validate_nvm = e1000_validate_nvm_checksum_82571, | 1723 | .validate = e1000_validate_nvm_checksum_82571, |
1735 | .write_nvm = e1000_write_nvm_82571, | 1724 | .write = e1000_write_nvm_82571, |
1736 | }; | 1725 | }; |
1737 | 1726 | ||
1738 | struct e1000_info e1000_82571_info = { | 1727 | struct e1000_info e1000_82571_info = { |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 1190167a8b3d..86d2809763c3 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 3e187b0e4203..3102d738cfd1 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | ||
39 | 40 | ||
40 | #include "hw.h" | 41 | #include "hw.h" |
41 | 42 | ||
@@ -47,9 +48,9 @@ struct e1000_info; | |||
47 | 48 | ||
48 | #ifdef DEBUG | 49 | #ifdef DEBUG |
49 | #define e_dbg(format, arg...) \ | 50 | #define e_dbg(format, arg...) \ |
50 | e_printk(KERN_DEBUG , adapter, format, ## arg) | 51 | e_printk(KERN_DEBUG , hw->adapter, format, ## arg) |
51 | #else | 52 | #else |
52 | #define e_dbg(format, arg...) do { (void)(adapter); } while (0) | 53 | #define e_dbg(format, arg...) do { (void)(hw); } while (0) |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #define e_err(format, arg...) \ | 56 | #define e_err(format, arg...) \ |
@@ -331,7 +332,6 @@ struct e1000_adapter { | |||
331 | /* OS defined structs */ | 332 | /* OS defined structs */ |
332 | struct net_device *netdev; | 333 | struct net_device *netdev; |
333 | struct pci_dev *pdev; | 334 | struct pci_dev *pdev; |
334 | struct net_device_stats net_stats; | ||
335 | 335 | ||
336 | /* structs defined in e1000_hw.h */ | 336 | /* structs defined in e1000_hw.h */ |
337 | struct e1000_hw hw; | 337 | struct e1000_hw hw; |
@@ -366,6 +366,7 @@ struct e1000_adapter { | |||
366 | struct work_struct downshift_task; | 366 | struct work_struct downshift_task; |
367 | struct work_struct update_phy_task; | 367 | struct work_struct update_phy_task; |
368 | struct work_struct led_blink_task; | 368 | struct work_struct led_blink_task; |
369 | struct work_struct print_hang_task; | ||
369 | }; | 370 | }; |
370 | 371 | ||
371 | struct e1000_info { | 372 | struct e1000_info { |
@@ -488,6 +489,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
488 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | 489 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); |
489 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | 490 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); |
490 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); | 491 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); |
492 | extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
491 | 493 | ||
492 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | 494 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); |
493 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | 495 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); |
@@ -585,7 +587,7 @@ extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); | |||
585 | 587 | ||
586 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) | 588 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) |
587 | { | 589 | { |
588 | return hw->phy.ops.reset_phy(hw); | 590 | return hw->phy.ops.reset(hw); |
589 | } | 591 | } |
590 | 592 | ||
591 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | 593 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) |
@@ -595,12 +597,12 @@ static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | |||
595 | 597 | ||
596 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | 598 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) |
597 | { | 599 | { |
598 | return hw->phy.ops.read_phy_reg(hw, offset, data); | 600 | return hw->phy.ops.read_reg(hw, offset, data); |
599 | } | 601 | } |
600 | 602 | ||
601 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) | 603 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) |
602 | { | 604 | { |
603 | return hw->phy.ops.write_phy_reg(hw, offset, data); | 605 | return hw->phy.ops.write_reg(hw, offset, data); |
604 | } | 606 | } |
605 | 607 | ||
606 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) | 608 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) |
@@ -620,27 +622,27 @@ extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); | |||
620 | 622 | ||
621 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) | 623 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) |
622 | { | 624 | { |
623 | return hw->nvm.ops.validate_nvm(hw); | 625 | return hw->nvm.ops.validate(hw); |
624 | } | 626 | } |
625 | 627 | ||
626 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) | 628 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) |
627 | { | 629 | { |
628 | return hw->nvm.ops.update_nvm(hw); | 630 | return hw->nvm.ops.update(hw); |
629 | } | 631 | } |
630 | 632 | ||
631 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 633 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
632 | { | 634 | { |
633 | return hw->nvm.ops.read_nvm(hw, offset, words, data); | 635 | return hw->nvm.ops.read(hw, offset, words, data); |
634 | } | 636 | } |
635 | 637 | ||
636 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 638 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
637 | { | 639 | { |
638 | return hw->nvm.ops.write_nvm(hw, offset, words, data); | 640 | return hw->nvm.ops.write(hw, offset, words, data); |
639 | } | 641 | } |
640 | 642 | ||
641 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) | 643 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) |
642 | { | 644 | { |
643 | return hw->phy.ops.get_phy_info(hw); | 645 | return hw->phy.ops.get_info(hw); |
644 | } | 646 | } |
645 | 647 | ||
646 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) | 648 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index ae5d73689353..e50579859e06 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -31,11 +31,6 @@ | |||
31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) | 31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/pci.h> | ||
38 | |||
39 | #include "e1000.h" | 34 | #include "e1000.h" |
40 | 35 | ||
41 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 | 36 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 |
@@ -121,8 +116,6 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
121 | /** | 116 | /** |
122 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. | 117 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. |
123 | * @hw: pointer to the HW structure | 118 | * @hw: pointer to the HW structure |
124 | * | ||
125 | * This is a function pointer entry point called by the api module. | ||
126 | **/ | 119 | **/ |
127 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | 120 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) |
128 | { | 121 | { |
@@ -152,8 +145,6 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
152 | /** | 145 | /** |
153 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. | 146 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. |
154 | * @hw: pointer to the HW structure | 147 | * @hw: pointer to the HW structure |
155 | * | ||
156 | * This is a function pointer entry point called by the api module. | ||
157 | **/ | 148 | **/ |
158 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | 149 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) |
159 | { | 150 | { |
@@ -200,8 +191,6 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | |||
200 | /** | 191 | /** |
201 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. | 192 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. |
202 | * @hw: pointer to the HW structure | 193 | * @hw: pointer to the HW structure |
203 | * | ||
204 | * This is a function pointer entry point called by the api module. | ||
205 | **/ | 194 | **/ |
206 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | 195 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) |
207 | { | 196 | { |
@@ -224,7 +213,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
224 | /* Set rar entry count */ | 213 | /* Set rar entry count */ |
225 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 214 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
226 | /* Set if manageability features are enabled. */ | 215 | /* Set if manageability features are enabled. */ |
227 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | 216 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
217 | ? true : false; | ||
228 | 218 | ||
229 | /* check for link */ | 219 | /* check for link */ |
230 | switch (hw->phy.media_type) { | 220 | switch (hw->phy.media_type) { |
@@ -272,8 +262,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) | |||
272 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY | 262 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY |
273 | * @hw: pointer to the HW structure | 263 | * @hw: pointer to the HW structure |
274 | * | 264 | * |
275 | * A wrapper to acquire access rights to the correct PHY. This is a | 265 | * A wrapper to acquire access rights to the correct PHY. |
276 | * function pointer entry point called by the api module. | ||
277 | **/ | 266 | **/ |
278 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | 267 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) |
279 | { | 268 | { |
@@ -287,8 +276,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | |||
287 | * e1000_release_phy_80003es2lan - Release rights to access PHY | 276 | * e1000_release_phy_80003es2lan - Release rights to access PHY |
288 | * @hw: pointer to the HW structure | 277 | * @hw: pointer to the HW structure |
289 | * | 278 | * |
290 | * A wrapper to release access rights to the correct PHY. This is a | 279 | * A wrapper to release access rights to the correct PHY. |
291 | * function pointer entry point called by the api module. | ||
292 | **/ | 280 | **/ |
293 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) | 281 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) |
294 | { | 282 | { |
@@ -333,8 +321,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) | |||
333 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM | 321 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM |
334 | * @hw: pointer to the HW structure | 322 | * @hw: pointer to the HW structure |
335 | * | 323 | * |
336 | * Acquire the semaphore to access the EEPROM. This is a function | 324 | * Acquire the semaphore to access the EEPROM. |
337 | * pointer entry point called by the api module. | ||
338 | **/ | 325 | **/ |
339 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | 326 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) |
340 | { | 327 | { |
@@ -356,8 +343,7 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | |||
356 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM | 343 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM |
357 | * @hw: pointer to the HW structure | 344 | * @hw: pointer to the HW structure |
358 | * | 345 | * |
359 | * Release the semaphore used to access the EEPROM. This is a | 346 | * Release the semaphore used to access the EEPROM. |
360 | * function pointer entry point called by the api module. | ||
361 | **/ | 347 | **/ |
362 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) | 348 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) |
363 | { | 349 | { |
@@ -399,8 +385,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
399 | } | 385 | } |
400 | 386 | ||
401 | if (i == timeout) { | 387 | if (i == timeout) { |
402 | hw_dbg(hw, | 388 | e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); |
403 | "Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
404 | return -E1000_ERR_SWFW_SYNC; | 389 | return -E1000_ERR_SWFW_SYNC; |
405 | } | 390 | } |
406 | 391 | ||
@@ -440,8 +425,7 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
440 | * @offset: offset of the register to read | 425 | * @offset: offset of the register to read |
441 | * @data: pointer to the data returned from the operation | 426 | * @data: pointer to the data returned from the operation |
442 | * | 427 | * |
443 | * Read the GG82563 PHY register. This is a function pointer entry | 428 | * Read the GG82563 PHY register. |
444 | * point called by the api module. | ||
445 | **/ | 429 | **/ |
446 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 430 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
447 | u32 offset, u16 *data) | 431 | u32 offset, u16 *data) |
@@ -505,8 +489,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
505 | * @offset: offset of the register to read | 489 | * @offset: offset of the register to read |
506 | * @data: value to write to the register | 490 | * @data: value to write to the register |
507 | * | 491 | * |
508 | * Write to the GG82563 PHY register. This is a function pointer entry | 492 | * Write to the GG82563 PHY register. |
509 | * point called by the api module. | ||
510 | **/ | 493 | **/ |
511 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | 494 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, |
512 | u32 offset, u16 data) | 495 | u32 offset, u16 data) |
@@ -571,8 +554,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
571 | * @words: number of words to write | 554 | * @words: number of words to write |
572 | * @data: buffer of data to write to the NVM | 555 | * @data: buffer of data to write to the NVM |
573 | * | 556 | * |
574 | * Write "words" of data to the ESB2 NVM. This is a function | 557 | * Write "words" of data to the ESB2 NVM. |
575 | * pointer entry point called by the api module. | ||
576 | **/ | 558 | **/ |
577 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, | 559 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, |
578 | u16 words, u16 *data) | 560 | u16 words, u16 *data) |
@@ -602,7 +584,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) | |||
602 | timeout--; | 584 | timeout--; |
603 | } | 585 | } |
604 | if (!timeout) { | 586 | if (!timeout) { |
605 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 587 | e_dbg("MNG configuration cycle has not completed.\n"); |
606 | return -E1000_ERR_RESET; | 588 | return -E1000_ERR_RESET; |
607 | } | 589 | } |
608 | 590 | ||
@@ -635,7 +617,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
635 | if (ret_val) | 617 | if (ret_val) |
636 | return ret_val; | 618 | return ret_val; |
637 | 619 | ||
638 | hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); | 620 | e_dbg("GG82563 PSCR: %X\n", phy_data); |
639 | 621 | ||
640 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 622 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
641 | if (ret_val) | 623 | if (ret_val) |
@@ -653,7 +635,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
653 | udelay(1); | 635 | udelay(1); |
654 | 636 | ||
655 | if (hw->phy.autoneg_wait_to_complete) { | 637 | if (hw->phy.autoneg_wait_to_complete) { |
656 | hw_dbg(hw, "Waiting for forced speed/duplex link " | 638 | e_dbg("Waiting for forced speed/duplex link " |
657 | "on GG82563 phy.\n"); | 639 | "on GG82563 phy.\n"); |
658 | 640 | ||
659 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 641 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
@@ -713,8 +695,7 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | |||
713 | { | 695 | { |
714 | struct e1000_phy_info *phy = &hw->phy; | 696 | struct e1000_phy_info *phy = &hw->phy; |
715 | s32 ret_val; | 697 | s32 ret_val; |
716 | u16 phy_data; | 698 | u16 phy_data, index; |
717 | u16 index; | ||
718 | 699 | ||
719 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); | 700 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); |
720 | if (ret_val) | 701 | if (ret_val) |
@@ -736,7 +717,6 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | |||
736 | * @duplex: pointer to duplex buffer | 717 | * @duplex: pointer to duplex buffer |
737 | * | 718 | * |
738 | * Retrieve the current speed and duplex configuration. | 719 | * Retrieve the current speed and duplex configuration. |
739 | * This is a function pointer entry point called by the api module. | ||
740 | **/ | 720 | **/ |
741 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | 721 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, |
742 | u16 *duplex) | 722 | u16 *duplex) |
@@ -762,12 +742,10 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | |||
762 | * @hw: pointer to the HW structure | 742 | * @hw: pointer to the HW structure |
763 | * | 743 | * |
764 | * Perform a global reset to the ESB2 controller. | 744 | * Perform a global reset to the ESB2 controller. |
765 | * This is a function pointer entry point called by the api module. | ||
766 | **/ | 745 | **/ |
767 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | 746 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) |
768 | { | 747 | { |
769 | u32 ctrl; | 748 | u32 ctrl, icr; |
770 | u32 icr; | ||
771 | s32 ret_val; | 749 | s32 ret_val; |
772 | 750 | ||
773 | /* | 751 | /* |
@@ -776,9 +754,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
776 | */ | 754 | */ |
777 | ret_val = e1000e_disable_pcie_master(hw); | 755 | ret_val = e1000e_disable_pcie_master(hw); |
778 | if (ret_val) | 756 | if (ret_val) |
779 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 757 | e_dbg("PCI-E Master disable polling has failed.\n"); |
780 | 758 | ||
781 | hw_dbg(hw, "Masking off all interrupts\n"); | 759 | e_dbg("Masking off all interrupts\n"); |
782 | ew32(IMC, 0xffffffff); | 760 | ew32(IMC, 0xffffffff); |
783 | 761 | ||
784 | ew32(RCTL, 0); | 762 | ew32(RCTL, 0); |
@@ -790,7 +768,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
790 | ctrl = er32(CTRL); | 768 | ctrl = er32(CTRL); |
791 | 769 | ||
792 | ret_val = e1000_acquire_phy_80003es2lan(hw); | 770 | ret_val = e1000_acquire_phy_80003es2lan(hw); |
793 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 771 | e_dbg("Issuing a global reset to MAC\n"); |
794 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 772 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
795 | e1000_release_phy_80003es2lan(hw); | 773 | e1000_release_phy_80003es2lan(hw); |
796 | 774 | ||
@@ -811,7 +789,6 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
811 | * @hw: pointer to the HW structure | 789 | * @hw: pointer to the HW structure |
812 | * | 790 | * |
813 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. | 791 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. |
814 | * This is a function pointer entry point called by the api module. | ||
815 | **/ | 792 | **/ |
816 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | 793 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) |
817 | { | 794 | { |
@@ -824,20 +801,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
824 | 801 | ||
825 | /* Initialize identification LED */ | 802 | /* Initialize identification LED */ |
826 | ret_val = e1000e_id_led_init(hw); | 803 | ret_val = e1000e_id_led_init(hw); |
827 | if (ret_val) { | 804 | if (ret_val) |
828 | hw_dbg(hw, "Error initializing identification LED\n"); | 805 | e_dbg("Error initializing identification LED\n"); |
829 | return ret_val; | 806 | /* This is not fatal and we should not stop init due to this */ |
830 | } | ||
831 | 807 | ||
832 | /* Disabling VLAN filtering */ | 808 | /* Disabling VLAN filtering */ |
833 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 809 | e_dbg("Initializing the IEEE VLAN\n"); |
834 | e1000e_clear_vfta(hw); | 810 | e1000e_clear_vfta(hw); |
835 | 811 | ||
836 | /* Setup the receive address. */ | 812 | /* Setup the receive address. */ |
837 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 813 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
838 | 814 | ||
839 | /* Zero out the Multicast HASH table */ | 815 | /* Zero out the Multicast HASH table */ |
840 | hw_dbg(hw, "Zeroing the MTA\n"); | 816 | e_dbg("Zeroing the MTA\n"); |
841 | for (i = 0; i < mac->mta_reg_count; i++) | 817 | for (i = 0; i < mac->mta_reg_count; i++) |
842 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 818 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
843 | 819 | ||
@@ -994,7 +970,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
994 | /* SW Reset the PHY so all changes take effect */ | 970 | /* SW Reset the PHY so all changes take effect */ |
995 | ret_val = e1000e_commit_phy(hw); | 971 | ret_val = e1000e_commit_phy(hw); |
996 | if (ret_val) { | 972 | if (ret_val) { |
997 | hw_dbg(hw, "Error Resetting the PHY\n"); | 973 | e_dbg("Error Resetting the PHY\n"); |
998 | return ret_val; | 974 | return ret_val; |
999 | } | 975 | } |
1000 | 976 | ||
@@ -1325,44 +1301,42 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1325 | **/ | 1301 | **/ |
1326 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) | 1302 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) |
1327 | { | 1303 | { |
1328 | u32 temp; | ||
1329 | |||
1330 | e1000e_clear_hw_cntrs_base(hw); | 1304 | e1000e_clear_hw_cntrs_base(hw); |
1331 | 1305 | ||
1332 | temp = er32(PRC64); | 1306 | er32(PRC64); |
1333 | temp = er32(PRC127); | 1307 | er32(PRC127); |
1334 | temp = er32(PRC255); | 1308 | er32(PRC255); |
1335 | temp = er32(PRC511); | 1309 | er32(PRC511); |
1336 | temp = er32(PRC1023); | 1310 | er32(PRC1023); |
1337 | temp = er32(PRC1522); | 1311 | er32(PRC1522); |
1338 | temp = er32(PTC64); | 1312 | er32(PTC64); |
1339 | temp = er32(PTC127); | 1313 | er32(PTC127); |
1340 | temp = er32(PTC255); | 1314 | er32(PTC255); |
1341 | temp = er32(PTC511); | 1315 | er32(PTC511); |
1342 | temp = er32(PTC1023); | 1316 | er32(PTC1023); |
1343 | temp = er32(PTC1522); | 1317 | er32(PTC1522); |
1344 | 1318 | ||
1345 | temp = er32(ALGNERRC); | 1319 | er32(ALGNERRC); |
1346 | temp = er32(RXERRC); | 1320 | er32(RXERRC); |
1347 | temp = er32(TNCRS); | 1321 | er32(TNCRS); |
1348 | temp = er32(CEXTERR); | 1322 | er32(CEXTERR); |
1349 | temp = er32(TSCTC); | 1323 | er32(TSCTC); |
1350 | temp = er32(TSCTFC); | 1324 | er32(TSCTFC); |
1351 | 1325 | ||
1352 | temp = er32(MGTPRC); | 1326 | er32(MGTPRC); |
1353 | temp = er32(MGTPDC); | 1327 | er32(MGTPDC); |
1354 | temp = er32(MGTPTC); | 1328 | er32(MGTPTC); |
1355 | 1329 | ||
1356 | temp = er32(IAC); | 1330 | er32(IAC); |
1357 | temp = er32(ICRXOC); | 1331 | er32(ICRXOC); |
1358 | 1332 | ||
1359 | temp = er32(ICRXPTC); | 1333 | er32(ICRXPTC); |
1360 | temp = er32(ICRXATC); | 1334 | er32(ICRXATC); |
1361 | temp = er32(ICTXPTC); | 1335 | er32(ICTXPTC); |
1362 | temp = er32(ICTXATC); | 1336 | er32(ICTXATC); |
1363 | temp = er32(ICTXQEC); | 1337 | er32(ICTXQEC); |
1364 | temp = er32(ICTXQMTC); | 1338 | er32(ICTXQMTC); |
1365 | temp = er32(ICRXDMTC); | 1339 | er32(ICRXDMTC); |
1366 | } | 1340 | } |
1367 | 1341 | ||
1368 | static struct e1000_mac_operations es2_mac_ops = { | 1342 | static struct e1000_mac_operations es2_mac_ops = { |
@@ -1384,30 +1358,30 @@ static struct e1000_mac_operations es2_mac_ops = { | |||
1384 | }; | 1358 | }; |
1385 | 1359 | ||
1386 | static struct e1000_phy_operations es2_phy_ops = { | 1360 | static struct e1000_phy_operations es2_phy_ops = { |
1387 | .acquire_phy = e1000_acquire_phy_80003es2lan, | 1361 | .acquire = e1000_acquire_phy_80003es2lan, |
1388 | .check_reset_block = e1000e_check_reset_block_generic, | 1362 | .check_reset_block = e1000e_check_reset_block_generic, |
1389 | .commit_phy = e1000e_phy_sw_reset, | 1363 | .commit = e1000e_phy_sw_reset, |
1390 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | 1364 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, |
1391 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, | 1365 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, |
1392 | .get_cable_length = e1000_get_cable_length_80003es2lan, | 1366 | .get_cable_length = e1000_get_cable_length_80003es2lan, |
1393 | .get_phy_info = e1000e_get_phy_info_m88, | 1367 | .get_info = e1000e_get_phy_info_m88, |
1394 | .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, | 1368 | .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, |
1395 | .release_phy = e1000_release_phy_80003es2lan, | 1369 | .release = e1000_release_phy_80003es2lan, |
1396 | .reset_phy = e1000e_phy_hw_reset_generic, | 1370 | .reset = e1000e_phy_hw_reset_generic, |
1397 | .set_d0_lplu_state = NULL, | 1371 | .set_d0_lplu_state = NULL, |
1398 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | 1372 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, |
1399 | .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, | 1373 | .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, |
1400 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | 1374 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, |
1401 | }; | 1375 | }; |
1402 | 1376 | ||
1403 | static struct e1000_nvm_operations es2_nvm_ops = { | 1377 | static struct e1000_nvm_operations es2_nvm_ops = { |
1404 | .acquire_nvm = e1000_acquire_nvm_80003es2lan, | 1378 | .acquire = e1000_acquire_nvm_80003es2lan, |
1405 | .read_nvm = e1000e_read_nvm_eerd, | 1379 | .read = e1000e_read_nvm_eerd, |
1406 | .release_nvm = e1000_release_nvm_80003es2lan, | 1380 | .release = e1000_release_nvm_80003es2lan, |
1407 | .update_nvm = e1000e_update_nvm_checksum_generic, | 1381 | .update = e1000e_update_nvm_checksum_generic, |
1408 | .valid_led_default = e1000e_valid_led_default, | 1382 | .valid_led_default = e1000e_valid_led_default, |
1409 | .validate_nvm = e1000e_validate_nvm_checksum_generic, | 1383 | .validate = e1000e_validate_nvm_checksum_generic, |
1410 | .write_nvm = e1000_write_nvm_80003es2lan, | 1384 | .write = e1000_write_nvm_80003es2lan, |
1411 | }; | 1385 | }; |
1412 | 1386 | ||
1413 | struct e1000_info e1000_es2_info = { | 1387 | struct e1000_info e1000_es2_info = { |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index e82638ecae88..b6243cad3103 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -35,14 +35,22 @@ | |||
35 | 35 | ||
36 | #include "e1000.h" | 36 | #include "e1000.h" |
37 | 37 | ||
38 | enum {NETDEV_STATS, E1000_STATS}; | ||
39 | |||
38 | struct e1000_stats { | 40 | struct e1000_stats { |
39 | char stat_string[ETH_GSTRING_LEN]; | 41 | char stat_string[ETH_GSTRING_LEN]; |
42 | int type; | ||
40 | int sizeof_stat; | 43 | int sizeof_stat; |
41 | int stat_offset; | 44 | int stat_offset; |
42 | }; | 45 | }; |
43 | 46 | ||
44 | #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ | 47 | #define E1000_STAT(m) E1000_STATS, \ |
45 | offsetof(struct e1000_adapter, m) | 48 | sizeof(((struct e1000_adapter *)0)->m), \ |
49 | offsetof(struct e1000_adapter, m) | ||
50 | #define E1000_NETDEV_STAT(m) NETDEV_STATS, \ | ||
51 | sizeof(((struct net_device *)0)->m), \ | ||
52 | offsetof(struct net_device, m) | ||
53 | |||
46 | static const struct e1000_stats e1000_gstrings_stats[] = { | 54 | static const struct e1000_stats e1000_gstrings_stats[] = { |
47 | { "rx_packets", E1000_STAT(stats.gprc) }, | 55 | { "rx_packets", E1000_STAT(stats.gprc) }, |
48 | { "tx_packets", E1000_STAT(stats.gptc) }, | 56 | { "tx_packets", E1000_STAT(stats.gptc) }, |
@@ -52,21 +60,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
52 | { "tx_broadcast", E1000_STAT(stats.bptc) }, | 60 | { "tx_broadcast", E1000_STAT(stats.bptc) }, |
53 | { "rx_multicast", E1000_STAT(stats.mprc) }, | 61 | { "rx_multicast", E1000_STAT(stats.mprc) }, |
54 | { "tx_multicast", E1000_STAT(stats.mptc) }, | 62 | { "tx_multicast", E1000_STAT(stats.mptc) }, |
55 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | 63 | { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) }, |
56 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | 64 | { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) }, |
57 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | 65 | { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, |
58 | { "multicast", E1000_STAT(stats.mprc) }, | 66 | { "multicast", E1000_STAT(stats.mprc) }, |
59 | { "collisions", E1000_STAT(stats.colc) }, | 67 | { "collisions", E1000_STAT(stats.colc) }, |
60 | { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, | 68 | { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) }, |
61 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | 69 | { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, |
62 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, | 70 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, |
63 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | 71 | { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, |
64 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | 72 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, |
65 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, | 73 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, |
66 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, | 74 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, |
67 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, | 75 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, |
68 | { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, | 76 | { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, |
69 | { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, | 77 | { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, |
70 | { "tx_window_errors", E1000_STAT(stats.latecol) }, | 78 | { "tx_window_errors", E1000_STAT(stats.latecol) }, |
71 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, | 79 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, |
72 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, | 80 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, |
@@ -182,6 +190,17 @@ static int e1000_get_settings(struct net_device *netdev, | |||
182 | static u32 e1000_get_link(struct net_device *netdev) | 190 | static u32 e1000_get_link(struct net_device *netdev) |
183 | { | 191 | { |
184 | struct e1000_adapter *adapter = netdev_priv(netdev); | 192 | struct e1000_adapter *adapter = netdev_priv(netdev); |
193 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
194 | |||
195 | /* | ||
196 | * If the link is not reported up to netdev, interrupts are disabled, | ||
197 | * and so the physical link state may have changed since we last | ||
198 | * looked. Set get_link_status to make sure that the true link | ||
199 | * state is interrogated, rather than pulling a cached and possibly | ||
200 | * stale link state from the driver. | ||
201 | */ | ||
202 | if (!netif_carrier_ok(netdev)) | ||
203 | mac->get_link_status = 1; | ||
185 | 204 | ||
186 | return e1000_has_link(adapter); | 205 | return e1000_has_link(adapter); |
187 | } | 206 | } |
@@ -596,7 +615,9 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
596 | * and flush shadow RAM for applicable controllers | 615 | * and flush shadow RAM for applicable controllers |
597 | */ | 616 | */ |
598 | if ((first_word <= NVM_CHECKSUM_REG) || | 617 | if ((first_word <= NVM_CHECKSUM_REG) || |
599 | (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573)) | 618 | (hw->mac.type == e1000_82583) || |
619 | (hw->mac.type == e1000_82574) || | ||
620 | (hw->mac.type == e1000_82573)) | ||
600 | ret_val = e1000e_update_nvm_checksum(hw); | 621 | ret_val = e1000e_update_nvm_checksum(hw); |
601 | 622 | ||
602 | out: | 623 | out: |
@@ -929,10 +950,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
929 | e1000e_set_interrupt_capability(adapter); | 950 | e1000e_set_interrupt_capability(adapter); |
930 | } | 951 | } |
931 | /* Hook up test interrupt handler just for this test */ | 952 | /* Hook up test interrupt handler just for this test */ |
932 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | 953 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, |
933 | netdev)) { | 954 | netdev)) { |
934 | shared_int = 0; | 955 | shared_int = 0; |
935 | } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, | 956 | } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, |
936 | netdev->name, netdev)) { | 957 | netdev->name, netdev)) { |
937 | *data = 1; | 958 | *data = 1; |
938 | ret_val = -1; | 959 | ret_val = -1; |
@@ -1239,6 +1260,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1239 | 1260 | ||
1240 | hw->mac.autoneg = 0; | 1261 | hw->mac.autoneg = 0; |
1241 | 1262 | ||
1263 | /* Workaround: K1 must be disabled for stable 1Gbps operation */ | ||
1264 | if (hw->mac.type == e1000_pchlan) | ||
1265 | e1000_configure_k1_ich8lan(hw, false); | ||
1266 | |||
1242 | if (hw->phy.type == e1000_phy_m88) { | 1267 | if (hw->phy.type == e1000_phy_m88) { |
1243 | /* Auto-MDI/MDIX Off */ | 1268 | /* Auto-MDI/MDIX Off */ |
1244 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | 1269 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); |
@@ -1769,12 +1794,11 @@ static int e1000_set_wol(struct net_device *netdev, | |||
1769 | { | 1794 | { |
1770 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1795 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1771 | 1796 | ||
1772 | if (wol->wolopts & WAKE_MAGICSECURE) | ||
1773 | return -EOPNOTSUPP; | ||
1774 | |||
1775 | if (!(adapter->flags & FLAG_HAS_WOL) || | 1797 | if (!(adapter->flags & FLAG_HAS_WOL) || |
1776 | !device_can_wakeup(&adapter->pdev->dev)) | 1798 | !device_can_wakeup(&adapter->pdev->dev) || |
1777 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1799 | (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | |
1800 | WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) | ||
1801 | return -EOPNOTSUPP; | ||
1778 | 1802 | ||
1779 | /* these settings will always override what we currently have */ | 1803 | /* these settings will always override what we currently have */ |
1780 | adapter->wol = 0; | 1804 | adapter->wol = 0; |
@@ -1832,6 +1856,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data) | |||
1832 | 1856 | ||
1833 | if ((hw->phy.type == e1000_phy_ife) || | 1857 | if ((hw->phy.type == e1000_phy_ife) || |
1834 | (hw->mac.type == e1000_pchlan) || | 1858 | (hw->mac.type == e1000_pchlan) || |
1859 | (hw->mac.type == e1000_82583) || | ||
1835 | (hw->mac.type == e1000_82574)) { | 1860 | (hw->mac.type == e1000_82574)) { |
1836 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); | 1861 | INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); |
1837 | if (!adapter->blink_timer.function) { | 1862 | if (!adapter->blink_timer.function) { |
@@ -1912,10 +1937,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1912 | { | 1937 | { |
1913 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1938 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1914 | int i; | 1939 | int i; |
1940 | char *p = NULL; | ||
1915 | 1941 | ||
1916 | e1000e_update_stats(adapter); | 1942 | e1000e_update_stats(adapter); |
1917 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1943 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { |
1918 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; | 1944 | switch (e1000_gstrings_stats[i].type) { |
1945 | case NETDEV_STATS: | ||
1946 | p = (char *) netdev + | ||
1947 | e1000_gstrings_stats[i].stat_offset; | ||
1948 | break; | ||
1949 | case E1000_STATS: | ||
1950 | p = (char *) adapter + | ||
1951 | e1000_gstrings_stats[i].stat_offset; | ||
1952 | break; | ||
1953 | } | ||
1954 | |||
1919 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1955 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1920 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1956 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1921 | } | 1957 | } |
@@ -1975,6 +2011,8 @@ static const struct ethtool_ops e1000_ethtool_ops = { | |||
1975 | .get_sset_count = e1000e_get_sset_count, | 2011 | .get_sset_count = e1000e_get_sset_count, |
1976 | .get_coalesce = e1000_get_coalesce, | 2012 | .get_coalesce = e1000_get_coalesce, |
1977 | .set_coalesce = e1000_set_coalesce, | 2013 | .set_coalesce = e1000_set_coalesce, |
2014 | .get_flags = ethtool_op_get_flags, | ||
2015 | .set_flags = ethtool_op_set_flags, | ||
1978 | }; | 2016 | }; |
1979 | 2017 | ||
1980 | void e1000e_set_ethtool_ops(struct net_device *netdev) | 2018 | void e1000e_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aaea41ef794d..426155c15cef 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -755,34 +755,34 @@ struct e1000_mac_operations { | |||
755 | 755 | ||
756 | /* Function pointers for the PHY. */ | 756 | /* Function pointers for the PHY. */ |
757 | struct e1000_phy_operations { | 757 | struct e1000_phy_operations { |
758 | s32 (*acquire_phy)(struct e1000_hw *); | 758 | s32 (*acquire)(struct e1000_hw *); |
759 | s32 (*cfg_on_link_up)(struct e1000_hw *); | ||
759 | s32 (*check_polarity)(struct e1000_hw *); | 760 | s32 (*check_polarity)(struct e1000_hw *); |
760 | s32 (*check_reset_block)(struct e1000_hw *); | 761 | s32 (*check_reset_block)(struct e1000_hw *); |
761 | s32 (*commit_phy)(struct e1000_hw *); | 762 | s32 (*commit)(struct e1000_hw *); |
762 | s32 (*force_speed_duplex)(struct e1000_hw *); | 763 | s32 (*force_speed_duplex)(struct e1000_hw *); |
763 | s32 (*get_cfg_done)(struct e1000_hw *hw); | 764 | s32 (*get_cfg_done)(struct e1000_hw *hw); |
764 | s32 (*get_cable_length)(struct e1000_hw *); | 765 | s32 (*get_cable_length)(struct e1000_hw *); |
765 | s32 (*get_phy_info)(struct e1000_hw *); | 766 | s32 (*get_info)(struct e1000_hw *); |
766 | s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); | 767 | s32 (*read_reg)(struct e1000_hw *, u32, u16 *); |
767 | s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); | 768 | s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); |
768 | void (*release_phy)(struct e1000_hw *); | 769 | void (*release)(struct e1000_hw *); |
769 | s32 (*reset_phy)(struct e1000_hw *); | 770 | s32 (*reset)(struct e1000_hw *); |
770 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); | 771 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); |
771 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); | 772 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); |
772 | s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); | 773 | s32 (*write_reg)(struct e1000_hw *, u32, u16); |
773 | s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); | 774 | s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); |
774 | s32 (*cfg_on_link_up)(struct e1000_hw *); | ||
775 | }; | 775 | }; |
776 | 776 | ||
777 | /* Function pointers for the NVM. */ | 777 | /* Function pointers for the NVM. */ |
778 | struct e1000_nvm_operations { | 778 | struct e1000_nvm_operations { |
779 | s32 (*acquire_nvm)(struct e1000_hw *); | 779 | s32 (*acquire)(struct e1000_hw *); |
780 | s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); | 780 | s32 (*read)(struct e1000_hw *, u16, u16, u16 *); |
781 | void (*release_nvm)(struct e1000_hw *); | 781 | void (*release)(struct e1000_hw *); |
782 | s32 (*update_nvm)(struct e1000_hw *); | 782 | s32 (*update)(struct e1000_hw *); |
783 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); | 783 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); |
784 | s32 (*validate_nvm)(struct e1000_hw *); | 784 | s32 (*validate)(struct e1000_hw *); |
785 | s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); | 785 | s32 (*write)(struct e1000_hw *, u16, u16, u16 *); |
786 | }; | 786 | }; |
787 | 787 | ||
788 | struct e1000_mac_info { | 788 | struct e1000_mac_info { |
@@ -925,15 +925,4 @@ struct e1000_hw { | |||
925 | } dev_spec; | 925 | } dev_spec; |
926 | }; | 926 | }; |
927 | 927 | ||
928 | #ifdef DEBUG | ||
929 | #define hw_dbg(hw, format, arg...) \ | ||
930 | printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) | ||
931 | #else | ||
932 | static inline int __attribute__ ((format (printf, 2, 3))) | ||
933 | hw_dbg(struct e1000_hw *hw, const char *format, ...) | ||
934 | { | ||
935 | return 0; | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | #endif | 928 | #endif |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index eff3f4783655..7530fc5d81c3 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -54,11 +54,6 @@ | |||
54 | * 82578DC Gigabit Network Connection | 54 | * 82578DC Gigabit Network Connection |
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include <linux/netdevice.h> | ||
58 | #include <linux/ethtool.h> | ||
59 | #include <linux/delay.h> | ||
60 | #include <linux/pci.h> | ||
61 | |||
62 | #include "e1000.h" | 57 | #include "e1000.h" |
63 | 58 | ||
64 | #define ICH_FLASH_GFPREG 0x0000 | 59 | #define ICH_FLASH_GFPREG 0x0000 |
@@ -224,7 +219,6 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw); | |||
224 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | 219 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); |
225 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | 220 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); |
226 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | 221 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); |
227 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
228 | 222 | ||
229 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | 223 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) |
230 | { | 224 | { |
@@ -266,12 +260,12 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
266 | phy->reset_delay_us = 100; | 260 | phy->reset_delay_us = 100; |
267 | 261 | ||
268 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | 262 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; |
269 | phy->ops.read_phy_reg = e1000_read_phy_reg_hv; | 263 | phy->ops.read_reg = e1000_read_phy_reg_hv; |
270 | phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; | 264 | phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; |
271 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | 265 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; |
272 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | 266 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; |
273 | phy->ops.write_phy_reg = e1000_write_phy_reg_hv; | 267 | phy->ops.write_reg = e1000_write_phy_reg_hv; |
274 | phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; | 268 | phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; |
275 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 269 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
276 | 270 | ||
277 | phy->id = e1000_phy_unknown; | 271 | phy->id = e1000_phy_unknown; |
@@ -283,8 +277,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
283 | phy->ops.force_speed_duplex = | 277 | phy->ops.force_speed_duplex = |
284 | e1000_phy_force_speed_duplex_82577; | 278 | e1000_phy_force_speed_duplex_82577; |
285 | phy->ops.get_cable_length = e1000_get_cable_length_82577; | 279 | phy->ops.get_cable_length = e1000_get_cable_length_82577; |
286 | phy->ops.get_phy_info = e1000_get_phy_info_82577; | 280 | phy->ops.get_info = e1000_get_phy_info_82577; |
287 | phy->ops.commit_phy = e1000e_phy_sw_reset; | 281 | phy->ops.commit = e1000e_phy_sw_reset; |
288 | } | 282 | } |
289 | 283 | ||
290 | return ret_val; | 284 | return ret_val; |
@@ -311,8 +305,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
311 | */ | 305 | */ |
312 | ret_val = e1000e_determine_phy_address(hw); | 306 | ret_val = e1000e_determine_phy_address(hw); |
313 | if (ret_val) { | 307 | if (ret_val) { |
314 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 308 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
315 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 309 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
316 | ret_val = e1000e_determine_phy_address(hw); | 310 | ret_val = e1000e_determine_phy_address(hw); |
317 | if (ret_val) | 311 | if (ret_val) |
318 | return ret_val; | 312 | return ret_val; |
@@ -332,8 +326,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
332 | case IGP03E1000_E_PHY_ID: | 326 | case IGP03E1000_E_PHY_ID: |
333 | phy->type = e1000_phy_igp_3; | 327 | phy->type = e1000_phy_igp_3; |
334 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 328 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
335 | phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; | 329 | phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; |
336 | phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; | 330 | phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; |
337 | break; | 331 | break; |
338 | case IFE_E_PHY_ID: | 332 | case IFE_E_PHY_ID: |
339 | case IFE_PLUS_E_PHY_ID: | 333 | case IFE_PLUS_E_PHY_ID: |
@@ -344,9 +338,9 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
344 | case BME1000_E_PHY_ID: | 338 | case BME1000_E_PHY_ID: |
345 | phy->type = e1000_phy_bm; | 339 | phy->type = e1000_phy_bm; |
346 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 340 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
347 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | 341 | phy->ops.read_reg = e1000e_read_phy_reg_bm; |
348 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | 342 | phy->ops.write_reg = e1000e_write_phy_reg_bm; |
349 | hw->phy.ops.commit_phy = e1000e_phy_sw_reset; | 343 | phy->ops.commit = e1000e_phy_sw_reset; |
350 | break; | 344 | break; |
351 | default: | 345 | default: |
352 | return -E1000_ERR_PHY; | 346 | return -E1000_ERR_PHY; |
@@ -374,7 +368,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
374 | 368 | ||
375 | /* Can't read flash registers if the register set isn't mapped. */ | 369 | /* Can't read flash registers if the register set isn't mapped. */ |
376 | if (!hw->flash_address) { | 370 | if (!hw->flash_address) { |
377 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); | 371 | e_dbg("ERROR: Flash registers not mapped\n"); |
378 | return -E1000_ERR_CONFIG; | 372 | return -E1000_ERR_CONFIG; |
379 | } | 373 | } |
380 | 374 | ||
@@ -407,7 +401,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
407 | 401 | ||
408 | /* Clear shadow ram */ | 402 | /* Clear shadow ram */ |
409 | for (i = 0; i < nvm->word_size; i++) { | 403 | for (i = 0; i < nvm->word_size; i++) { |
410 | dev_spec->shadow_ram[i].modified = 0; | 404 | dev_spec->shadow_ram[i].modified = false; |
411 | dev_spec->shadow_ram[i].value = 0xFFFF; | 405 | dev_spec->shadow_ram[i].value = 0xFFFF; |
412 | } | 406 | } |
413 | 407 | ||
@@ -436,7 +430,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
436 | if (mac->type == e1000_ich8lan) | 430 | if (mac->type == e1000_ich8lan) |
437 | mac->rar_entry_count--; | 431 | mac->rar_entry_count--; |
438 | /* Set if manageability features are enabled. */ | 432 | /* Set if manageability features are enabled. */ |
439 | mac->arc_subsystem_valid = 1; | 433 | mac->arc_subsystem_valid = true; |
440 | 434 | ||
441 | /* LED operations */ | 435 | /* LED operations */ |
442 | switch (mac->type) { | 436 | switch (mac->type) { |
@@ -470,7 +464,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
470 | 464 | ||
471 | /* Enable PCS Lock-loss workaround for ICH8 */ | 465 | /* Enable PCS Lock-loss workaround for ICH8 */ |
472 | if (mac->type == e1000_ich8lan) | 466 | if (mac->type == e1000_ich8lan) |
473 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); | 467 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); |
474 | 468 | ||
475 | return 0; | 469 | return 0; |
476 | } | 470 | } |
@@ -556,7 +550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
556 | */ | 550 | */ |
557 | ret_val = e1000e_config_fc_after_link_up(hw); | 551 | ret_val = e1000e_config_fc_after_link_up(hw); |
558 | if (ret_val) | 552 | if (ret_val) |
559 | hw_dbg(hw, "Error configuring flow control\n"); | 553 | e_dbg("Error configuring flow control\n"); |
560 | 554 | ||
561 | out: | 555 | out: |
562 | return ret_val; | 556 | return ret_val; |
@@ -636,8 +630,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
636 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; | 630 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; |
637 | s32 ret_val = 0; | 631 | s32 ret_val = 0; |
638 | 632 | ||
639 | might_sleep(); | ||
640 | |||
641 | mutex_lock(&swflag_mutex); | 633 | mutex_lock(&swflag_mutex); |
642 | 634 | ||
643 | while (timeout) { | 635 | while (timeout) { |
@@ -650,7 +642,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
650 | } | 642 | } |
651 | 643 | ||
652 | if (!timeout) { | 644 | if (!timeout) { |
653 | hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); | 645 | e_dbg("SW/FW/HW has locked the resource for too long.\n"); |
654 | ret_val = -E1000_ERR_CONFIG; | 646 | ret_val = -E1000_ERR_CONFIG; |
655 | goto out; | 647 | goto out; |
656 | } | 648 | } |
@@ -670,7 +662,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
670 | } | 662 | } |
671 | 663 | ||
672 | if (!timeout) { | 664 | if (!timeout) { |
673 | hw_dbg(hw, "Failed to acquire the semaphore.\n"); | 665 | e_dbg("Failed to acquire the semaphore.\n"); |
674 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 666 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
675 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 667 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
676 | ret_val = -E1000_ERR_CONFIG; | 668 | ret_val = -E1000_ERR_CONFIG; |
@@ -714,7 +706,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
714 | **/ | 706 | **/ |
715 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) | 707 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) |
716 | { | 708 | { |
717 | u32 fwsm = er32(FWSM); | 709 | u32 fwsm; |
710 | |||
711 | fwsm = er32(FWSM); | ||
718 | 712 | ||
719 | return (fwsm & E1000_FWSM_MODE_MASK) == | 713 | return (fwsm & E1000_FWSM_MODE_MASK) == |
720 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); | 714 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); |
@@ -779,12 +773,12 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
779 | if (ret_val) | 773 | if (ret_val) |
780 | return ret_val; | 774 | return ret_val; |
781 | 775 | ||
782 | hw_dbg(hw, "IFE PMC: %X\n", data); | 776 | e_dbg("IFE PMC: %X\n", data); |
783 | 777 | ||
784 | udelay(1); | 778 | udelay(1); |
785 | 779 | ||
786 | if (phy->autoneg_wait_to_complete) { | 780 | if (phy->autoneg_wait_to_complete) { |
787 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); | 781 | e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); |
788 | 782 | ||
789 | ret_val = e1000e_phy_has_link_generic(hw, | 783 | ret_val = e1000e_phy_has_link_generic(hw, |
790 | PHY_FORCE_LIMIT, | 784 | PHY_FORCE_LIMIT, |
@@ -794,7 +788,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
794 | return ret_val; | 788 | return ret_val; |
795 | 789 | ||
796 | if (!link) | 790 | if (!link) |
797 | hw_dbg(hw, "Link taking longer than expected.\n"); | 791 | e_dbg("Link taking longer than expected.\n"); |
798 | 792 | ||
799 | /* Try once more */ | 793 | /* Try once more */ |
800 | ret_val = e1000e_phy_has_link_generic(hw, | 794 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -822,7 +816,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
822 | s32 ret_val; | 816 | s32 ret_val; |
823 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | 817 | u16 word_addr, reg_data, reg_addr, phy_page = 0; |
824 | 818 | ||
825 | ret_val = hw->phy.ops.acquire_phy(hw); | 819 | ret_val = hw->phy.ops.acquire(hw); |
826 | if (ret_val) | 820 | if (ret_val) |
827 | return ret_val; | 821 | return ret_val; |
828 | 822 | ||
@@ -918,7 +912,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
918 | reg_addr &= PHY_REG_MASK; | 912 | reg_addr &= PHY_REG_MASK; |
919 | reg_addr |= phy_page; | 913 | reg_addr |= phy_page; |
920 | 914 | ||
921 | ret_val = phy->ops.write_phy_reg_locked(hw, | 915 | ret_val = phy->ops.write_reg_locked(hw, |
922 | (u32)reg_addr, | 916 | (u32)reg_addr, |
923 | reg_data); | 917 | reg_data); |
924 | if (ret_val) | 918 | if (ret_val) |
@@ -927,7 +921,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
927 | } | 921 | } |
928 | 922 | ||
929 | out: | 923 | out: |
930 | hw->phy.ops.release_phy(hw); | 924 | hw->phy.ops.release(hw); |
931 | return ret_val; | 925 | return ret_val; |
932 | } | 926 | } |
933 | 927 | ||
@@ -951,15 +945,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
951 | goto out; | 945 | goto out; |
952 | 946 | ||
953 | /* Wrap the whole flow with the sw flag */ | 947 | /* Wrap the whole flow with the sw flag */ |
954 | ret_val = hw->phy.ops.acquire_phy(hw); | 948 | ret_val = hw->phy.ops.acquire(hw); |
955 | if (ret_val) | 949 | if (ret_val) |
956 | goto out; | 950 | goto out; |
957 | 951 | ||
958 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | 952 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ |
959 | if (link) { | 953 | if (link) { |
960 | if (hw->phy.type == e1000_phy_82578) { | 954 | if (hw->phy.type == e1000_phy_82578) { |
961 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 955 | ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, |
962 | BM_CS_STATUS, | ||
963 | &status_reg); | 956 | &status_reg); |
964 | if (ret_val) | 957 | if (ret_val) |
965 | goto release; | 958 | goto release; |
@@ -975,8 +968,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
975 | } | 968 | } |
976 | 969 | ||
977 | if (hw->phy.type == e1000_phy_82577) { | 970 | if (hw->phy.type == e1000_phy_82577) { |
978 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | 971 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, |
979 | HV_M_STATUS, | ||
980 | &status_reg); | 972 | &status_reg); |
981 | if (ret_val) | 973 | if (ret_val) |
982 | goto release; | 974 | goto release; |
@@ -992,14 +984,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
992 | } | 984 | } |
993 | 985 | ||
994 | /* Link stall fix for link up */ | 986 | /* Link stall fix for link up */ |
995 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 987 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
996 | 0x0100); | 988 | 0x0100); |
997 | if (ret_val) | 989 | if (ret_val) |
998 | goto release; | 990 | goto release; |
999 | 991 | ||
1000 | } else { | 992 | } else { |
1001 | /* Link stall fix for link down */ | 993 | /* Link stall fix for link down */ |
1002 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | 994 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), |
1003 | 0x4100); | 995 | 0x4100); |
1004 | if (ret_val) | 996 | if (ret_val) |
1005 | goto release; | 997 | goto release; |
@@ -1008,7 +1000,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | |||
1008 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); | 1000 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); |
1009 | 1001 | ||
1010 | release: | 1002 | release: |
1011 | hw->phy.ops.release_phy(hw); | 1003 | hw->phy.ops.release(hw); |
1012 | out: | 1004 | out: |
1013 | return ret_val; | 1005 | return ret_val; |
1014 | } | 1006 | } |
@@ -1023,7 +1015,7 @@ out: | |||
1023 | * | 1015 | * |
1024 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | 1016 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) |
1025 | **/ | 1017 | **/ |
1026 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | 1018 | s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) |
1027 | { | 1019 | { |
1028 | s32 ret_val = 0; | 1020 | s32 ret_val = 0; |
1029 | u32 ctrl_reg = 0; | 1021 | u32 ctrl_reg = 0; |
@@ -1084,7 +1076,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1084 | if (hw->mac.type != e1000_pchlan) | 1076 | if (hw->mac.type != e1000_pchlan) |
1085 | return ret_val; | 1077 | return ret_val; |
1086 | 1078 | ||
1087 | ret_val = hw->phy.ops.acquire_phy(hw); | 1079 | ret_val = hw->phy.ops.acquire(hw); |
1088 | if (ret_val) | 1080 | if (ret_val) |
1089 | return ret_val; | 1081 | return ret_val; |
1090 | 1082 | ||
@@ -1098,7 +1090,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1098 | 1090 | ||
1099 | mac_reg = er32(PHY_CTRL); | 1091 | mac_reg = er32(PHY_CTRL); |
1100 | 1092 | ||
1101 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); | 1093 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); |
1102 | if (ret_val) | 1094 | if (ret_val) |
1103 | goto out; | 1095 | goto out; |
1104 | 1096 | ||
@@ -1120,10 +1112,10 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
1120 | /* Restart auto-neg to activate the bits */ | 1112 | /* Restart auto-neg to activate the bits */ |
1121 | if (!e1000_check_reset_block(hw)) | 1113 | if (!e1000_check_reset_block(hw)) |
1122 | oem_reg |= HV_OEM_BITS_RESTART_AN; | 1114 | oem_reg |= HV_OEM_BITS_RESTART_AN; |
1123 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); | 1115 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); |
1124 | 1116 | ||
1125 | out: | 1117 | out: |
1126 | hw->phy.ops.release_phy(hw); | 1118 | hw->phy.ops.release(hw); |
1127 | 1119 | ||
1128 | return ret_val; | 1120 | return ret_val; |
1129 | } | 1121 | } |
@@ -1166,7 +1158,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1166 | } | 1158 | } |
1167 | 1159 | ||
1168 | /* Select page 0 */ | 1160 | /* Select page 0 */ |
1169 | ret_val = hw->phy.ops.acquire_phy(hw); | 1161 | ret_val = hw->phy.ops.acquire(hw); |
1170 | if (ret_val) | 1162 | if (ret_val) |
1171 | return ret_val; | 1163 | return ret_val; |
1172 | 1164 | ||
@@ -1174,7 +1166,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1174 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | 1166 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); |
1175 | if (ret_val) | 1167 | if (ret_val) |
1176 | goto out; | 1168 | goto out; |
1177 | hw->phy.ops.release_phy(hw); | 1169 | hw->phy.ops.release(hw); |
1178 | 1170 | ||
1179 | /* | 1171 | /* |
1180 | * Configure the K1 Si workaround during phy reset assuming there is | 1172 | * Configure the K1 Si workaround during phy reset assuming there is |
@@ -1210,7 +1202,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | |||
1210 | * leave the PHY in a bad state possibly resulting in no link. | 1202 | * leave the PHY in a bad state possibly resulting in no link. |
1211 | */ | 1203 | */ |
1212 | if (loop == 0) | 1204 | if (loop == 0) |
1213 | hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); | 1205 | e_dbg("LAN_INIT_DONE not set, increase timeout\n"); |
1214 | 1206 | ||
1215 | /* Clear the Init Done bit for the next init event */ | 1207 | /* Clear the Init Done bit for the next init event */ |
1216 | data = er32(STATUS); | 1208 | data = er32(STATUS); |
@@ -1281,7 +1273,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) | |||
1281 | return ret_val; | 1273 | return ret_val; |
1282 | 1274 | ||
1283 | if (!link) { | 1275 | if (!link) { |
1284 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1276 | e_dbg("Phy info is only valid if link is up\n"); |
1285 | return -E1000_ERR_CONFIG; | 1277 | return -E1000_ERR_CONFIG; |
1286 | } | 1278 | } |
1287 | 1279 | ||
@@ -1412,7 +1404,7 @@ out: | |||
1412 | /** | 1404 | /** |
1413 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state | 1405 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state |
1414 | * @hw: pointer to the HW structure | 1406 | * @hw: pointer to the HW structure |
1415 | * @active: TRUE to enable LPLU, FALSE to disable | 1407 | * @active: true to enable LPLU, false to disable |
1416 | * | 1408 | * |
1417 | * Sets the LPLU D0 state according to the active flag. When | 1409 | * Sets the LPLU D0 state according to the active flag. When |
1418 | * activating LPLU this function also disables smart speed | 1410 | * activating LPLU this function also disables smart speed |
@@ -1498,7 +1490,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
1498 | /** | 1490 | /** |
1499 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state | 1491 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state |
1500 | * @hw: pointer to the HW structure | 1492 | * @hw: pointer to the HW structure |
1501 | * @active: TRUE to enable LPLU, FALSE to disable | 1493 | * @active: true to enable LPLU, false to disable |
1502 | * | 1494 | * |
1503 | * Sets the LPLU D3 state according to the active flag. When | 1495 | * Sets the LPLU D3 state according to the active flag. When |
1504 | * activating LPLU this function also disables smart speed | 1496 | * activating LPLU this function also disables smart speed |
@@ -1611,7 +1603,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1611 | 1603 | ||
1612 | return 0; | 1604 | return 0; |
1613 | } | 1605 | } |
1614 | hw_dbg(hw, "Unable to determine valid NVM bank via EEC - " | 1606 | e_dbg("Unable to determine valid NVM bank via EEC - " |
1615 | "reading flash signature\n"); | 1607 | "reading flash signature\n"); |
1616 | /* fall-thru */ | 1608 | /* fall-thru */ |
1617 | default: | 1609 | default: |
@@ -1641,7 +1633,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1641 | return 0; | 1633 | return 0; |
1642 | } | 1634 | } |
1643 | 1635 | ||
1644 | hw_dbg(hw, "ERROR: No valid NVM bank present\n"); | 1636 | e_dbg("ERROR: No valid NVM bank present\n"); |
1645 | return -E1000_ERR_NVM; | 1637 | return -E1000_ERR_NVM; |
1646 | } | 1638 | } |
1647 | 1639 | ||
@@ -1669,16 +1661,16 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1669 | 1661 | ||
1670 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1662 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1671 | (words == 0)) { | 1663 | (words == 0)) { |
1672 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1664 | e_dbg("nvm parameter(s) out of bounds\n"); |
1673 | ret_val = -E1000_ERR_NVM; | 1665 | ret_val = -E1000_ERR_NVM; |
1674 | goto out; | 1666 | goto out; |
1675 | } | 1667 | } |
1676 | 1668 | ||
1677 | nvm->ops.acquire_nvm(hw); | 1669 | nvm->ops.acquire(hw); |
1678 | 1670 | ||
1679 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1671 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1680 | if (ret_val) { | 1672 | if (ret_val) { |
1681 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1673 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
1682 | bank = 0; | 1674 | bank = 0; |
1683 | } | 1675 | } |
1684 | 1676 | ||
@@ -1700,11 +1692,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1700 | } | 1692 | } |
1701 | } | 1693 | } |
1702 | 1694 | ||
1703 | nvm->ops.release_nvm(hw); | 1695 | nvm->ops.release(hw); |
1704 | 1696 | ||
1705 | out: | 1697 | out: |
1706 | if (ret_val) | 1698 | if (ret_val) |
1707 | hw_dbg(hw, "NVM read error: %d\n", ret_val); | 1699 | e_dbg("NVM read error: %d\n", ret_val); |
1708 | 1700 | ||
1709 | return ret_val; | 1701 | return ret_val; |
1710 | } | 1702 | } |
@@ -1726,7 +1718,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1726 | 1718 | ||
1727 | /* Check if the flash descriptor is valid */ | 1719 | /* Check if the flash descriptor is valid */ |
1728 | if (hsfsts.hsf_status.fldesvalid == 0) { | 1720 | if (hsfsts.hsf_status.fldesvalid == 0) { |
1729 | hw_dbg(hw, "Flash descriptor invalid. " | 1721 | e_dbg("Flash descriptor invalid. " |
1730 | "SW Sequencing must be used."); | 1722 | "SW Sequencing must be used."); |
1731 | return -E1000_ERR_NVM; | 1723 | return -E1000_ERR_NVM; |
1732 | } | 1724 | } |
@@ -1776,7 +1768,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1776 | hsfsts.hsf_status.flcdone = 1; | 1768 | hsfsts.hsf_status.flcdone = 1; |
1777 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 1769 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
1778 | } else { | 1770 | } else { |
1779 | hw_dbg(hw, "Flash controller busy, cannot get access"); | 1771 | e_dbg("Flash controller busy, cannot get access"); |
1780 | } | 1772 | } |
1781 | } | 1773 | } |
1782 | 1774 | ||
@@ -1926,7 +1918,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1926 | /* Repeat for some time before giving up. */ | 1918 | /* Repeat for some time before giving up. */ |
1927 | continue; | 1919 | continue; |
1928 | } else if (hsfsts.hsf_status.flcdone == 0) { | 1920 | } else if (hsfsts.hsf_status.flcdone == 0) { |
1929 | hw_dbg(hw, "Timeout error - flash cycle " | 1921 | e_dbg("Timeout error - flash cycle " |
1930 | "did not complete."); | 1922 | "did not complete."); |
1931 | break; | 1923 | break; |
1932 | } | 1924 | } |
@@ -1954,18 +1946,18 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1954 | 1946 | ||
1955 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1947 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1956 | (words == 0)) { | 1948 | (words == 0)) { |
1957 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1949 | e_dbg("nvm parameter(s) out of bounds\n"); |
1958 | return -E1000_ERR_NVM; | 1950 | return -E1000_ERR_NVM; |
1959 | } | 1951 | } |
1960 | 1952 | ||
1961 | nvm->ops.acquire_nvm(hw); | 1953 | nvm->ops.acquire(hw); |
1962 | 1954 | ||
1963 | for (i = 0; i < words; i++) { | 1955 | for (i = 0; i < words; i++) { |
1964 | dev_spec->shadow_ram[offset+i].modified = 1; | 1956 | dev_spec->shadow_ram[offset+i].modified = true; |
1965 | dev_spec->shadow_ram[offset+i].value = data[i]; | 1957 | dev_spec->shadow_ram[offset+i].value = data[i]; |
1966 | } | 1958 | } |
1967 | 1959 | ||
1968 | nvm->ops.release_nvm(hw); | 1960 | nvm->ops.release(hw); |
1969 | 1961 | ||
1970 | return 0; | 1962 | return 0; |
1971 | } | 1963 | } |
@@ -1996,7 +1988,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1996 | if (nvm->type != e1000_nvm_flash_sw) | 1988 | if (nvm->type != e1000_nvm_flash_sw) |
1997 | goto out; | 1989 | goto out; |
1998 | 1990 | ||
1999 | nvm->ops.acquire_nvm(hw); | 1991 | nvm->ops.acquire(hw); |
2000 | 1992 | ||
2001 | /* | 1993 | /* |
2002 | * We're writing to the opposite bank so if we're on bank 1, | 1994 | * We're writing to the opposite bank so if we're on bank 1, |
@@ -2005,7 +1997,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2005 | */ | 1997 | */ |
2006 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1998 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
2007 | if (ret_val) { | 1999 | if (ret_val) { |
2008 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 2000 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
2009 | bank = 0; | 2001 | bank = 0; |
2010 | } | 2002 | } |
2011 | 2003 | ||
@@ -2014,7 +2006,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2014 | old_bank_offset = 0; | 2006 | old_bank_offset = 0; |
2015 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | 2007 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); |
2016 | if (ret_val) { | 2008 | if (ret_val) { |
2017 | nvm->ops.release_nvm(hw); | 2009 | nvm->ops.release(hw); |
2018 | goto out; | 2010 | goto out; |
2019 | } | 2011 | } |
2020 | } else { | 2012 | } else { |
@@ -2022,7 +2014,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2022 | new_bank_offset = 0; | 2014 | new_bank_offset = 0; |
2023 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | 2015 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); |
2024 | if (ret_val) { | 2016 | if (ret_val) { |
2025 | nvm->ops.release_nvm(hw); | 2017 | nvm->ops.release(hw); |
2026 | goto out; | 2018 | goto out; |
2027 | } | 2019 | } |
2028 | } | 2020 | } |
@@ -2079,8 +2071,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2079 | */ | 2071 | */ |
2080 | if (ret_val) { | 2072 | if (ret_val) { |
2081 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | 2073 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ |
2082 | hw_dbg(hw, "Flash commit failed.\n"); | 2074 | e_dbg("Flash commit failed.\n"); |
2083 | nvm->ops.release_nvm(hw); | 2075 | nvm->ops.release(hw); |
2084 | goto out; | 2076 | goto out; |
2085 | } | 2077 | } |
2086 | 2078 | ||
@@ -2093,7 +2085,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2093 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | 2085 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; |
2094 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); | 2086 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); |
2095 | if (ret_val) { | 2087 | if (ret_val) { |
2096 | nvm->ops.release_nvm(hw); | 2088 | nvm->ops.release(hw); |
2097 | goto out; | 2089 | goto out; |
2098 | } | 2090 | } |
2099 | data &= 0xBFFF; | 2091 | data &= 0xBFFF; |
@@ -2101,7 +2093,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2101 | act_offset * 2 + 1, | 2093 | act_offset * 2 + 1, |
2102 | (u8)(data >> 8)); | 2094 | (u8)(data >> 8)); |
2103 | if (ret_val) { | 2095 | if (ret_val) { |
2104 | nvm->ops.release_nvm(hw); | 2096 | nvm->ops.release(hw); |
2105 | goto out; | 2097 | goto out; |
2106 | } | 2098 | } |
2107 | 2099 | ||
@@ -2114,17 +2106,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2114 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | 2106 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; |
2115 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | 2107 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); |
2116 | if (ret_val) { | 2108 | if (ret_val) { |
2117 | nvm->ops.release_nvm(hw); | 2109 | nvm->ops.release(hw); |
2118 | goto out; | 2110 | goto out; |
2119 | } | 2111 | } |
2120 | 2112 | ||
2121 | /* Great! Everything worked, we can now clear the cached entries. */ | 2113 | /* Great! Everything worked, we can now clear the cached entries. */ |
2122 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | 2114 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { |
2123 | dev_spec->shadow_ram[i].modified = 0; | 2115 | dev_spec->shadow_ram[i].modified = false; |
2124 | dev_spec->shadow_ram[i].value = 0xFFFF; | 2116 | dev_spec->shadow_ram[i].value = 0xFFFF; |
2125 | } | 2117 | } |
2126 | 2118 | ||
2127 | nvm->ops.release_nvm(hw); | 2119 | nvm->ops.release(hw); |
2128 | 2120 | ||
2129 | /* | 2121 | /* |
2130 | * Reload the EEPROM, or else modifications will not appear | 2122 | * Reload the EEPROM, or else modifications will not appear |
@@ -2135,7 +2127,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2135 | 2127 | ||
2136 | out: | 2128 | out: |
2137 | if (ret_val) | 2129 | if (ret_val) |
2138 | hw_dbg(hw, "NVM update error: %d\n", ret_val); | 2130 | e_dbg("NVM update error: %d\n", ret_val); |
2139 | 2131 | ||
2140 | return ret_val; | 2132 | return ret_val; |
2141 | } | 2133 | } |
@@ -2193,7 +2185,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2193 | union ich8_hws_flash_status hsfsts; | 2185 | union ich8_hws_flash_status hsfsts; |
2194 | u32 gfpreg; | 2186 | u32 gfpreg; |
2195 | 2187 | ||
2196 | nvm->ops.acquire_nvm(hw); | 2188 | nvm->ops.acquire(hw); |
2197 | 2189 | ||
2198 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 2190 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
2199 | 2191 | ||
@@ -2214,7 +2206,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
2214 | hsfsts.hsf_status.flockdn = true; | 2206 | hsfsts.hsf_status.flockdn = true; |
2215 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 2207 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
2216 | 2208 | ||
2217 | nvm->ops.release_nvm(hw); | 2209 | nvm->ops.release(hw); |
2218 | } | 2210 | } |
2219 | 2211 | ||
2220 | /** | 2212 | /** |
@@ -2285,7 +2277,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
2285 | /* Repeat for some time before giving up. */ | 2277 | /* Repeat for some time before giving up. */ |
2286 | continue; | 2278 | continue; |
2287 | if (hsfsts.hsf_status.flcdone == 0) { | 2279 | if (hsfsts.hsf_status.flcdone == 0) { |
2288 | hw_dbg(hw, "Timeout error - flash cycle " | 2280 | e_dbg("Timeout error - flash cycle " |
2289 | "did not complete."); | 2281 | "did not complete."); |
2290 | break; | 2282 | break; |
2291 | } | 2283 | } |
@@ -2330,7 +2322,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | |||
2330 | return ret_val; | 2322 | return ret_val; |
2331 | 2323 | ||
2332 | for (program_retries = 0; program_retries < 100; program_retries++) { | 2324 | for (program_retries = 0; program_retries < 100; program_retries++) { |
2333 | hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); | 2325 | e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); |
2334 | udelay(100); | 2326 | udelay(100); |
2335 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | 2327 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); |
2336 | if (!ret_val) | 2328 | if (!ret_val) |
@@ -2360,9 +2352,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
2360 | u32 flash_bank_size = nvm->flash_bank_size * 2; | 2352 | u32 flash_bank_size = nvm->flash_bank_size * 2; |
2361 | s32 ret_val; | 2353 | s32 ret_val; |
2362 | s32 count = 0; | 2354 | s32 count = 0; |
2363 | s32 iteration; | 2355 | s32 j, iteration, sector_size; |
2364 | s32 sector_size; | ||
2365 | s32 j; | ||
2366 | 2356 | ||
2367 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | 2357 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); |
2368 | 2358 | ||
@@ -2465,7 +2455,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) | |||
2465 | 2455 | ||
2466 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 2456 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
2467 | if (ret_val) { | 2457 | if (ret_val) { |
2468 | hw_dbg(hw, "NVM Read Error\n"); | 2458 | e_dbg("NVM Read Error\n"); |
2469 | return ret_val; | 2459 | return ret_val; |
2470 | } | 2460 | } |
2471 | 2461 | ||
@@ -2595,10 +2585,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2595 | */ | 2585 | */ |
2596 | ret_val = e1000e_disable_pcie_master(hw); | 2586 | ret_val = e1000e_disable_pcie_master(hw); |
2597 | if (ret_val) { | 2587 | if (ret_val) { |
2598 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 2588 | e_dbg("PCI-E Master disable polling has failed.\n"); |
2599 | } | 2589 | } |
2600 | 2590 | ||
2601 | hw_dbg(hw, "Masking off all interrupts\n"); | 2591 | e_dbg("Masking off all interrupts\n"); |
2602 | ew32(IMC, 0xffffffff); | 2592 | ew32(IMC, 0xffffffff); |
2603 | 2593 | ||
2604 | /* | 2594 | /* |
@@ -2650,7 +2640,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2650 | } | 2640 | } |
2651 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 2641 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
2652 | /* Whether or not the swflag was acquired, we need to reset the part */ | 2642 | /* Whether or not the swflag was acquired, we need to reset the part */ |
2653 | hw_dbg(hw, "Issuing a global reset to ich8lan\n"); | 2643 | e_dbg("Issuing a global reset to ich8lan\n"); |
2654 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 2644 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
2655 | msleep(20); | 2645 | msleep(20); |
2656 | 2646 | ||
@@ -2670,7 +2660,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2670 | * return with an error. This can happen in situations | 2660 | * return with an error. This can happen in situations |
2671 | * where there is no eeprom and prevents getting link. | 2661 | * where there is no eeprom and prevents getting link. |
2672 | */ | 2662 | */ |
2673 | hw_dbg(hw, "Auto Read Done did not complete\n"); | 2663 | e_dbg("Auto Read Done did not complete\n"); |
2674 | } | 2664 | } |
2675 | } | 2665 | } |
2676 | /* Dummy read to clear the phy wakeup bit after lcd reset */ | 2666 | /* Dummy read to clear the phy wakeup bit after lcd reset */ |
@@ -2731,16 +2721,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2731 | 2721 | ||
2732 | /* Initialize identification LED */ | 2722 | /* Initialize identification LED */ |
2733 | ret_val = mac->ops.id_led_init(hw); | 2723 | ret_val = mac->ops.id_led_init(hw); |
2734 | if (ret_val) { | 2724 | if (ret_val) |
2735 | hw_dbg(hw, "Error initializing identification LED\n"); | 2725 | e_dbg("Error initializing identification LED\n"); |
2736 | return ret_val; | 2726 | /* This is not fatal and we should not stop init due to this */ |
2737 | } | ||
2738 | 2727 | ||
2739 | /* Setup the receive address. */ | 2728 | /* Setup the receive address. */ |
2740 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 2729 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
2741 | 2730 | ||
2742 | /* Zero out the Multicast HASH table */ | 2731 | /* Zero out the Multicast HASH table */ |
2743 | hw_dbg(hw, "Zeroing the MTA\n"); | 2732 | e_dbg("Zeroing the MTA\n"); |
2744 | for (i = 0; i < mac->mta_reg_count; i++) | 2733 | for (i = 0; i < mac->mta_reg_count; i++) |
2745 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 2734 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
2746 | 2735 | ||
@@ -2750,7 +2739,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2750 | * Reset the phy after disabling host wakeup to reset the Rx buffer. | 2739 | * Reset the phy after disabling host wakeup to reset the Rx buffer. |
2751 | */ | 2740 | */ |
2752 | if (hw->phy.type == e1000_phy_82578) { | 2741 | if (hw->phy.type == e1000_phy_82578) { |
2753 | hw->phy.ops.read_phy_reg(hw, BM_WUC, &i); | 2742 | hw->phy.ops.read_reg(hw, BM_WUC, &i); |
2754 | ret_val = e1000_phy_hw_reset_ich8lan(hw); | 2743 | ret_val = e1000_phy_hw_reset_ich8lan(hw); |
2755 | if (ret_val) | 2744 | if (ret_val) |
2756 | return ret_val; | 2745 | return ret_val; |
@@ -2886,7 +2875,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2886 | */ | 2875 | */ |
2887 | hw->fc.current_mode = hw->fc.requested_mode; | 2876 | hw->fc.current_mode = hw->fc.requested_mode; |
2888 | 2877 | ||
2889 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 2878 | e_dbg("After fix-ups FlowControl is now = %x\n", |
2890 | hw->fc.current_mode); | 2879 | hw->fc.current_mode); |
2891 | 2880 | ||
2892 | /* Continue to configure the copper link. */ | 2881 | /* Continue to configure the copper link. */ |
@@ -2897,7 +2886,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2897 | ew32(FCTTV, hw->fc.pause_time); | 2886 | ew32(FCTTV, hw->fc.pause_time); |
2898 | if ((hw->phy.type == e1000_phy_82578) || | 2887 | if ((hw->phy.type == e1000_phy_82578) || |
2899 | (hw->phy.type == e1000_phy_82577)) { | 2888 | (hw->phy.type == e1000_phy_82577)) { |
2900 | ret_val = hw->phy.ops.write_phy_reg(hw, | 2889 | ret_val = hw->phy.ops.write_reg(hw, |
2901 | PHY_REG(BM_PORT_CTRL_PAGE, 27), | 2890 | PHY_REG(BM_PORT_CTRL_PAGE, 27), |
2902 | hw->fc.pause_time); | 2891 | hw->fc.pause_time); |
2903 | if (ret_val) | 2892 | if (ret_val) |
@@ -2960,7 +2949,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2960 | return ret_val; | 2949 | return ret_val; |
2961 | break; | 2950 | break; |
2962 | case e1000_phy_ife: | 2951 | case e1000_phy_ife: |
2963 | ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2952 | ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, |
2964 | ®_data); | 2953 | ®_data); |
2965 | if (ret_val) | 2954 | if (ret_val) |
2966 | return ret_val; | 2955 | return ret_val; |
@@ -2979,7 +2968,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
2979 | reg_data |= IFE_PMC_AUTO_MDIX; | 2968 | reg_data |= IFE_PMC_AUTO_MDIX; |
2980 | break; | 2969 | break; |
2981 | } | 2970 | } |
2982 | ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, | 2971 | ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, |
2983 | reg_data); | 2972 | reg_data); |
2984 | if (ret_val) | 2973 | if (ret_val) |
2985 | return ret_val; | 2974 | return ret_val; |
@@ -3092,8 +3081,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
3092 | * @hw: pointer to the HW structure | 3081 | * @hw: pointer to the HW structure |
3093 | * @state: boolean value used to set the current Kumeran workaround state | 3082 | * @state: boolean value used to set the current Kumeran workaround state |
3094 | * | 3083 | * |
3095 | * If ICH8, set the current Kumeran workaround state (enabled - TRUE | 3084 | * If ICH8, set the current Kumeran workaround state (enabled - true |
3096 | * /disabled - FALSE). | 3085 | * /disabled - false). |
3097 | **/ | 3086 | **/ |
3098 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | 3087 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, |
3099 | bool state) | 3088 | bool state) |
@@ -3101,7 +3090,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
3101 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 3090 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
3102 | 3091 | ||
3103 | if (hw->mac.type != e1000_ich8lan) { | 3092 | if (hw->mac.type != e1000_ich8lan) { |
3104 | hw_dbg(hw, "Workaround applies to ICH8 only.\n"); | 3093 | e_dbg("Workaround applies to ICH8 only.\n"); |
3105 | return; | 3094 | return; |
3106 | } | 3095 | } |
3107 | 3096 | ||
@@ -3281,7 +3270,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | |||
3281 | **/ | 3270 | **/ |
3282 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | 3271 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) |
3283 | { | 3272 | { |
3284 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3273 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3285 | (u16)hw->mac.ledctl_mode1); | 3274 | (u16)hw->mac.ledctl_mode1); |
3286 | } | 3275 | } |
3287 | 3276 | ||
@@ -3293,7 +3282,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | |||
3293 | **/ | 3282 | **/ |
3294 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) | 3283 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) |
3295 | { | 3284 | { |
3296 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, | 3285 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, |
3297 | (u16)hw->mac.ledctl_default); | 3286 | (u16)hw->mac.ledctl_default); |
3298 | } | 3287 | } |
3299 | 3288 | ||
@@ -3325,7 +3314,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) | |||
3325 | } | 3314 | } |
3326 | } | 3315 | } |
3327 | 3316 | ||
3328 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3317 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3329 | } | 3318 | } |
3330 | 3319 | ||
3331 | /** | 3320 | /** |
@@ -3356,7 +3345,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) | |||
3356 | } | 3345 | } |
3357 | } | 3346 | } |
3358 | 3347 | ||
3359 | return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); | 3348 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); |
3360 | } | 3349 | } |
3361 | 3350 | ||
3362 | /** | 3351 | /** |
@@ -3379,8 +3368,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3379 | if (status & E1000_STATUS_PHYRA) | 3368 | if (status & E1000_STATUS_PHYRA) |
3380 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); | 3369 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); |
3381 | else | 3370 | else |
3382 | hw_dbg(hw, | 3371 | e_dbg("PHY Reset Asserted not set - needs delay\n"); |
3383 | "PHY Reset Asserted not set - needs delay\n"); | ||
3384 | } | 3372 | } |
3385 | 3373 | ||
3386 | e1000e_get_cfg_done(hw); | 3374 | e1000e_get_cfg_done(hw); |
@@ -3395,7 +3383,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3395 | } else { | 3383 | } else { |
3396 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { | 3384 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { |
3397 | /* Maybe we should do a basic PHY config */ | 3385 | /* Maybe we should do a basic PHY config */ |
3398 | hw_dbg(hw, "EEPROM not present\n"); | 3386 | e_dbg("EEPROM not present\n"); |
3399 | return -E1000_ERR_CONFIG; | 3387 | return -E1000_ERR_CONFIG; |
3400 | } | 3388 | } |
3401 | } | 3389 | } |
@@ -3412,42 +3400,41 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3412 | **/ | 3400 | **/ |
3413 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) | 3401 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) |
3414 | { | 3402 | { |
3415 | u32 temp; | ||
3416 | u16 phy_data; | 3403 | u16 phy_data; |
3417 | 3404 | ||
3418 | e1000e_clear_hw_cntrs_base(hw); | 3405 | e1000e_clear_hw_cntrs_base(hw); |
3419 | 3406 | ||
3420 | temp = er32(ALGNERRC); | 3407 | er32(ALGNERRC); |
3421 | temp = er32(RXERRC); | 3408 | er32(RXERRC); |
3422 | temp = er32(TNCRS); | 3409 | er32(TNCRS); |
3423 | temp = er32(CEXTERR); | 3410 | er32(CEXTERR); |
3424 | temp = er32(TSCTC); | 3411 | er32(TSCTC); |
3425 | temp = er32(TSCTFC); | 3412 | er32(TSCTFC); |
3426 | 3413 | ||
3427 | temp = er32(MGTPRC); | 3414 | er32(MGTPRC); |
3428 | temp = er32(MGTPDC); | 3415 | er32(MGTPDC); |
3429 | temp = er32(MGTPTC); | 3416 | er32(MGTPTC); |
3430 | 3417 | ||
3431 | temp = er32(IAC); | 3418 | er32(IAC); |
3432 | temp = er32(ICRXOC); | 3419 | er32(ICRXOC); |
3433 | 3420 | ||
3434 | /* Clear PHY statistics registers */ | 3421 | /* Clear PHY statistics registers */ |
3435 | if ((hw->phy.type == e1000_phy_82578) || | 3422 | if ((hw->phy.type == e1000_phy_82578) || |
3436 | (hw->phy.type == e1000_phy_82577)) { | 3423 | (hw->phy.type == e1000_phy_82577)) { |
3437 | hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data); | 3424 | hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); |
3438 | hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data); | 3425 | hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); |
3439 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data); | 3426 | hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); |
3440 | hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data); | 3427 | hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); |
3441 | hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data); | 3428 | hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); |
3442 | hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data); | 3429 | hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); |
3443 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data); | 3430 | hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); |
3444 | hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data); | 3431 | hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); |
3445 | hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data); | 3432 | hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); |
3446 | hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data); | 3433 | hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); |
3447 | hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data); | 3434 | hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); |
3448 | hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data); | 3435 | hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); |
3449 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data); | 3436 | hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); |
3450 | hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data); | 3437 | hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); |
3451 | } | 3438 | } |
3452 | } | 3439 | } |
3453 | 3440 | ||
@@ -3470,29 +3457,29 @@ static struct e1000_mac_operations ich8_mac_ops = { | |||
3470 | }; | 3457 | }; |
3471 | 3458 | ||
3472 | static struct e1000_phy_operations ich8_phy_ops = { | 3459 | static struct e1000_phy_operations ich8_phy_ops = { |
3473 | .acquire_phy = e1000_acquire_swflag_ich8lan, | 3460 | .acquire = e1000_acquire_swflag_ich8lan, |
3474 | .check_reset_block = e1000_check_reset_block_ich8lan, | 3461 | .check_reset_block = e1000_check_reset_block_ich8lan, |
3475 | .commit_phy = NULL, | 3462 | .commit = NULL, |
3476 | .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, | 3463 | .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, |
3477 | .get_cfg_done = e1000_get_cfg_done_ich8lan, | 3464 | .get_cfg_done = e1000_get_cfg_done_ich8lan, |
3478 | .get_cable_length = e1000e_get_cable_length_igp_2, | 3465 | .get_cable_length = e1000e_get_cable_length_igp_2, |
3479 | .get_phy_info = e1000_get_phy_info_ich8lan, | 3466 | .get_info = e1000_get_phy_info_ich8lan, |
3480 | .read_phy_reg = e1000e_read_phy_reg_igp, | 3467 | .read_reg = e1000e_read_phy_reg_igp, |
3481 | .release_phy = e1000_release_swflag_ich8lan, | 3468 | .release = e1000_release_swflag_ich8lan, |
3482 | .reset_phy = e1000_phy_hw_reset_ich8lan, | 3469 | .reset = e1000_phy_hw_reset_ich8lan, |
3483 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, | 3470 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, |
3484 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, | 3471 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, |
3485 | .write_phy_reg = e1000e_write_phy_reg_igp, | 3472 | .write_reg = e1000e_write_phy_reg_igp, |
3486 | }; | 3473 | }; |
3487 | 3474 | ||
3488 | static struct e1000_nvm_operations ich8_nvm_ops = { | 3475 | static struct e1000_nvm_operations ich8_nvm_ops = { |
3489 | .acquire_nvm = e1000_acquire_nvm_ich8lan, | 3476 | .acquire = e1000_acquire_nvm_ich8lan, |
3490 | .read_nvm = e1000_read_nvm_ich8lan, | 3477 | .read = e1000_read_nvm_ich8lan, |
3491 | .release_nvm = e1000_release_nvm_ich8lan, | 3478 | .release = e1000_release_nvm_ich8lan, |
3492 | .update_nvm = e1000_update_nvm_checksum_ich8lan, | 3479 | .update = e1000_update_nvm_checksum_ich8lan, |
3493 | .valid_led_default = e1000_valid_led_default_ich8lan, | 3480 | .valid_led_default = e1000_valid_led_default_ich8lan, |
3494 | .validate_nvm = e1000_validate_nvm_checksum_ich8lan, | 3481 | .validate = e1000_validate_nvm_checksum_ich8lan, |
3495 | .write_nvm = e1000_write_nvm_ich8lan, | 3482 | .write = e1000_write_nvm_ich8lan, |
3496 | }; | 3483 | }; |
3497 | 3484 | ||
3498 | struct e1000_info e1000_ich8_info = { | 3485 | struct e1000_info e1000_ich8_info = { |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 99ba2b8a2a05..f690a1055b41 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -26,11 +26,6 @@ | |||
26 | 26 | ||
27 | *******************************************************************************/ | 27 | *******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/pci.h> | ||
33 | |||
34 | #include "e1000.h" | 29 | #include "e1000.h" |
35 | 30 | ||
36 | enum e1000_mng_mode { | 31 | enum e1000_mng_mode { |
@@ -115,12 +110,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | |||
115 | u32 i; | 110 | u32 i; |
116 | 111 | ||
117 | /* Setup the receive address */ | 112 | /* Setup the receive address */ |
118 | hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); | 113 | e_dbg("Programming MAC Address into RAR[0]\n"); |
119 | 114 | ||
120 | e1000e_rar_set(hw, hw->mac.addr, 0); | 115 | e1000e_rar_set(hw, hw->mac.addr, 0); |
121 | 116 | ||
122 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 117 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
123 | hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); | 118 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
124 | for (i = 1; i < rar_count; i++) { | 119 | for (i = 1; i < rar_count; i++) { |
125 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 120 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); |
126 | e1e_flush(); | 121 | e1e_flush(); |
@@ -276,7 +271,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
276 | for (; mc_addr_count > 0; mc_addr_count--) { | 271 | for (; mc_addr_count > 0; mc_addr_count--) { |
277 | u32 hash_value, hash_reg, hash_bit, mta; | 272 | u32 hash_value, hash_reg, hash_bit, mta; |
278 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | 273 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
279 | hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); | 274 | e_dbg("Hash value = 0x%03X\n", hash_value); |
280 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | 275 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
281 | hash_bit = hash_value & 0x1F; | 276 | hash_bit = hash_value & 0x1F; |
282 | mta = (1 << hash_bit); | 277 | mta = (1 << hash_bit); |
@@ -300,45 +295,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
300 | **/ | 295 | **/ |
301 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) | 296 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) |
302 | { | 297 | { |
303 | u32 temp; | 298 | er32(CRCERRS); |
304 | 299 | er32(SYMERRS); | |
305 | temp = er32(CRCERRS); | 300 | er32(MPC); |
306 | temp = er32(SYMERRS); | 301 | er32(SCC); |
307 | temp = er32(MPC); | 302 | er32(ECOL); |
308 | temp = er32(SCC); | 303 | er32(MCC); |
309 | temp = er32(ECOL); | 304 | er32(LATECOL); |
310 | temp = er32(MCC); | 305 | er32(COLC); |
311 | temp = er32(LATECOL); | 306 | er32(DC); |
312 | temp = er32(COLC); | 307 | er32(SEC); |
313 | temp = er32(DC); | 308 | er32(RLEC); |
314 | temp = er32(SEC); | 309 | er32(XONRXC); |
315 | temp = er32(RLEC); | 310 | er32(XONTXC); |
316 | temp = er32(XONRXC); | 311 | er32(XOFFRXC); |
317 | temp = er32(XONTXC); | 312 | er32(XOFFTXC); |
318 | temp = er32(XOFFRXC); | 313 | er32(FCRUC); |
319 | temp = er32(XOFFTXC); | 314 | er32(GPRC); |
320 | temp = er32(FCRUC); | 315 | er32(BPRC); |
321 | temp = er32(GPRC); | 316 | er32(MPRC); |
322 | temp = er32(BPRC); | 317 | er32(GPTC); |
323 | temp = er32(MPRC); | 318 | er32(GORCL); |
324 | temp = er32(GPTC); | 319 | er32(GORCH); |
325 | temp = er32(GORCL); | 320 | er32(GOTCL); |
326 | temp = er32(GORCH); | 321 | er32(GOTCH); |
327 | temp = er32(GOTCL); | 322 | er32(RNBC); |
328 | temp = er32(GOTCH); | 323 | er32(RUC); |
329 | temp = er32(RNBC); | 324 | er32(RFC); |
330 | temp = er32(RUC); | 325 | er32(ROC); |
331 | temp = er32(RFC); | 326 | er32(RJC); |
332 | temp = er32(ROC); | 327 | er32(TORL); |
333 | temp = er32(RJC); | 328 | er32(TORH); |
334 | temp = er32(TORL); | 329 | er32(TOTL); |
335 | temp = er32(TORH); | 330 | er32(TOTH); |
336 | temp = er32(TOTL); | 331 | er32(TPR); |
337 | temp = er32(TOTH); | 332 | er32(TPT); |
338 | temp = er32(TPR); | 333 | er32(MPTC); |
339 | temp = er32(TPT); | 334 | er32(BPTC); |
340 | temp = er32(MPTC); | ||
341 | temp = er32(BPTC); | ||
342 | } | 335 | } |
343 | 336 | ||
344 | /** | 337 | /** |
@@ -376,7 +369,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
376 | if (!link) | 369 | if (!link) |
377 | return ret_val; /* No link detected */ | 370 | return ret_val; /* No link detected */ |
378 | 371 | ||
379 | mac->get_link_status = 0; | 372 | mac->get_link_status = false; |
380 | 373 | ||
381 | /* | 374 | /* |
382 | * Check if there was DownShift, must be checked | 375 | * Check if there was DownShift, must be checked |
@@ -408,7 +401,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
408 | */ | 401 | */ |
409 | ret_val = e1000e_config_fc_after_link_up(hw); | 402 | ret_val = e1000e_config_fc_after_link_up(hw); |
410 | if (ret_val) { | 403 | if (ret_val) { |
411 | hw_dbg(hw, "Error configuring flow control\n"); | 404 | e_dbg("Error configuring flow control\n"); |
412 | } | 405 | } |
413 | 406 | ||
414 | return ret_val; | 407 | return ret_val; |
@@ -448,7 +441,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
448 | mac->autoneg_failed = 1; | 441 | mac->autoneg_failed = 1; |
449 | return 0; | 442 | return 0; |
450 | } | 443 | } |
451 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 444 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
452 | 445 | ||
453 | /* Disable auto-negotiation in the TXCW register */ | 446 | /* Disable auto-negotiation in the TXCW register */ |
454 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 447 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -461,7 +454,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
461 | /* Configure Flow Control after forcing link up. */ | 454 | /* Configure Flow Control after forcing link up. */ |
462 | ret_val = e1000e_config_fc_after_link_up(hw); | 455 | ret_val = e1000e_config_fc_after_link_up(hw); |
463 | if (ret_val) { | 456 | if (ret_val) { |
464 | hw_dbg(hw, "Error configuring flow control\n"); | 457 | e_dbg("Error configuring flow control\n"); |
465 | return ret_val; | 458 | return ret_val; |
466 | } | 459 | } |
467 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 460 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -471,7 +464,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
471 | * and disable forced link in the Device Control register | 464 | * and disable forced link in the Device Control register |
472 | * in an attempt to auto-negotiate with our link partner. | 465 | * in an attempt to auto-negotiate with our link partner. |
473 | */ | 466 | */ |
474 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 467 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
475 | ew32(TXCW, mac->txcw); | 468 | ew32(TXCW, mac->txcw); |
476 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 469 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
477 | 470 | ||
@@ -513,7 +506,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
513 | mac->autoneg_failed = 1; | 506 | mac->autoneg_failed = 1; |
514 | return 0; | 507 | return 0; |
515 | } | 508 | } |
516 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 509 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
517 | 510 | ||
518 | /* Disable auto-negotiation in the TXCW register */ | 511 | /* Disable auto-negotiation in the TXCW register */ |
519 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 512 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -526,7 +519,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
526 | /* Configure Flow Control after forcing link up. */ | 519 | /* Configure Flow Control after forcing link up. */ |
527 | ret_val = e1000e_config_fc_after_link_up(hw); | 520 | ret_val = e1000e_config_fc_after_link_up(hw); |
528 | if (ret_val) { | 521 | if (ret_val) { |
529 | hw_dbg(hw, "Error configuring flow control\n"); | 522 | e_dbg("Error configuring flow control\n"); |
530 | return ret_val; | 523 | return ret_val; |
531 | } | 524 | } |
532 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 525 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -536,7 +529,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
536 | * and disable forced link in the Device Control register | 529 | * and disable forced link in the Device Control register |
537 | * in an attempt to auto-negotiate with our link partner. | 530 | * in an attempt to auto-negotiate with our link partner. |
538 | */ | 531 | */ |
539 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 532 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
540 | ew32(TXCW, mac->txcw); | 533 | ew32(TXCW, mac->txcw); |
541 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 534 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
542 | 535 | ||
@@ -553,11 +546,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
553 | if (rxcw & E1000_RXCW_SYNCH) { | 546 | if (rxcw & E1000_RXCW_SYNCH) { |
554 | if (!(rxcw & E1000_RXCW_IV)) { | 547 | if (!(rxcw & E1000_RXCW_IV)) { |
555 | mac->serdes_has_link = true; | 548 | mac->serdes_has_link = true; |
556 | hw_dbg(hw, "SERDES: Link up - forced.\n"); | 549 | e_dbg("SERDES: Link up - forced.\n"); |
557 | } | 550 | } |
558 | } else { | 551 | } else { |
559 | mac->serdes_has_link = false; | 552 | mac->serdes_has_link = false; |
560 | hw_dbg(hw, "SERDES: Link down - force failed.\n"); | 553 | e_dbg("SERDES: Link down - force failed.\n"); |
561 | } | 554 | } |
562 | } | 555 | } |
563 | 556 | ||
@@ -570,20 +563,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
570 | if (rxcw & E1000_RXCW_SYNCH) { | 563 | if (rxcw & E1000_RXCW_SYNCH) { |
571 | if (!(rxcw & E1000_RXCW_IV)) { | 564 | if (!(rxcw & E1000_RXCW_IV)) { |
572 | mac->serdes_has_link = true; | 565 | mac->serdes_has_link = true; |
573 | hw_dbg(hw, "SERDES: Link up - autoneg " | 566 | e_dbg("SERDES: Link up - autoneg " |
574 | "completed sucessfully.\n"); | 567 | "completed sucessfully.\n"); |
575 | } else { | 568 | } else { |
576 | mac->serdes_has_link = false; | 569 | mac->serdes_has_link = false; |
577 | hw_dbg(hw, "SERDES: Link down - invalid" | 570 | e_dbg("SERDES: Link down - invalid" |
578 | "codewords detected in autoneg.\n"); | 571 | "codewords detected in autoneg.\n"); |
579 | } | 572 | } |
580 | } else { | 573 | } else { |
581 | mac->serdes_has_link = false; | 574 | mac->serdes_has_link = false; |
582 | hw_dbg(hw, "SERDES: Link down - no sync.\n"); | 575 | e_dbg("SERDES: Link down - no sync.\n"); |
583 | } | 576 | } |
584 | } else { | 577 | } else { |
585 | mac->serdes_has_link = false; | 578 | mac->serdes_has_link = false; |
586 | hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); | 579 | e_dbg("SERDES: Link down - autoneg failed\n"); |
587 | } | 580 | } |
588 | } | 581 | } |
589 | 582 | ||
@@ -614,7 +607,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
614 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | 607 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
615 | 608 | ||
616 | if (ret_val) { | 609 | if (ret_val) { |
617 | hw_dbg(hw, "NVM Read Error\n"); | 610 | e_dbg("NVM Read Error\n"); |
618 | return ret_val; | 611 | return ret_val; |
619 | } | 612 | } |
620 | 613 | ||
@@ -667,7 +660,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
667 | */ | 660 | */ |
668 | hw->fc.current_mode = hw->fc.requested_mode; | 661 | hw->fc.current_mode = hw->fc.requested_mode; |
669 | 662 | ||
670 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 663 | e_dbg("After fix-ups FlowControl is now = %x\n", |
671 | hw->fc.current_mode); | 664 | hw->fc.current_mode); |
672 | 665 | ||
673 | /* Call the necessary media_type subroutine to configure the link. */ | 666 | /* Call the necessary media_type subroutine to configure the link. */ |
@@ -681,7 +674,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
681 | * control is disabled, because it does not hurt anything to | 674 | * control is disabled, because it does not hurt anything to |
682 | * initialize these registers. | 675 | * initialize these registers. |
683 | */ | 676 | */ |
684 | hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); | 677 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
685 | ew32(FCT, FLOW_CONTROL_TYPE); | 678 | ew32(FCT, FLOW_CONTROL_TYPE); |
686 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 679 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
687 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 680 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
@@ -751,7 +744,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
751 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 744 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
752 | break; | 745 | break; |
753 | default: | 746 | default: |
754 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 747 | e_dbg("Flow control param set incorrectly\n"); |
755 | return -E1000_ERR_CONFIG; | 748 | return -E1000_ERR_CONFIG; |
756 | break; | 749 | break; |
757 | } | 750 | } |
@@ -789,7 +782,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
789 | break; | 782 | break; |
790 | } | 783 | } |
791 | if (i == FIBER_LINK_UP_LIMIT) { | 784 | if (i == FIBER_LINK_UP_LIMIT) { |
792 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 785 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
793 | mac->autoneg_failed = 1; | 786 | mac->autoneg_failed = 1; |
794 | /* | 787 | /* |
795 | * AutoNeg failed to achieve a link, so we'll call | 788 | * AutoNeg failed to achieve a link, so we'll call |
@@ -799,13 +792,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
799 | */ | 792 | */ |
800 | ret_val = mac->ops.check_for_link(hw); | 793 | ret_val = mac->ops.check_for_link(hw); |
801 | if (ret_val) { | 794 | if (ret_val) { |
802 | hw_dbg(hw, "Error while checking for link\n"); | 795 | e_dbg("Error while checking for link\n"); |
803 | return ret_val; | 796 | return ret_val; |
804 | } | 797 | } |
805 | mac->autoneg_failed = 0; | 798 | mac->autoneg_failed = 0; |
806 | } else { | 799 | } else { |
807 | mac->autoneg_failed = 0; | 800 | mac->autoneg_failed = 0; |
808 | hw_dbg(hw, "Valid Link Found\n"); | 801 | e_dbg("Valid Link Found\n"); |
809 | } | 802 | } |
810 | 803 | ||
811 | return 0; | 804 | return 0; |
@@ -841,7 +834,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
841 | * then the link-up status bit will be set and the flow control enable | 834 | * then the link-up status bit will be set and the flow control enable |
842 | * bits (RFCE and TFCE) will be set according to their negotiated value. | 835 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
843 | */ | 836 | */ |
844 | hw_dbg(hw, "Auto-negotiation enabled\n"); | 837 | e_dbg("Auto-negotiation enabled\n"); |
845 | 838 | ||
846 | ew32(CTRL, ctrl); | 839 | ew32(CTRL, ctrl); |
847 | e1e_flush(); | 840 | e1e_flush(); |
@@ -856,7 +849,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
856 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 849 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
857 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 850 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
858 | } else { | 851 | } else { |
859 | hw_dbg(hw, "No signal detected\n"); | 852 | e_dbg("No signal detected\n"); |
860 | } | 853 | } |
861 | 854 | ||
862 | return 0; | 855 | return 0; |
@@ -952,7 +945,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
952 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 945 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
953 | * other: No other values should be possible at this point. | 946 | * other: No other values should be possible at this point. |
954 | */ | 947 | */ |
955 | hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); | 948 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
956 | 949 | ||
957 | switch (hw->fc.current_mode) { | 950 | switch (hw->fc.current_mode) { |
958 | case e1000_fc_none: | 951 | case e1000_fc_none: |
@@ -970,7 +963,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
970 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | 963 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
971 | break; | 964 | break; |
972 | default: | 965 | default: |
973 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 966 | e_dbg("Flow control param set incorrectly\n"); |
974 | return -E1000_ERR_CONFIG; | 967 | return -E1000_ERR_CONFIG; |
975 | } | 968 | } |
976 | 969 | ||
@@ -1011,7 +1004,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1011 | } | 1004 | } |
1012 | 1005 | ||
1013 | if (ret_val) { | 1006 | if (ret_val) { |
1014 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1007 | e_dbg("Error forcing flow control settings\n"); |
1015 | return ret_val; | 1008 | return ret_val; |
1016 | } | 1009 | } |
1017 | 1010 | ||
@@ -1035,7 +1028,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1035 | return ret_val; | 1028 | return ret_val; |
1036 | 1029 | ||
1037 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | 1030 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
1038 | hw_dbg(hw, "Copper PHY and Auto Neg " | 1031 | e_dbg("Copper PHY and Auto Neg " |
1039 | "has not completed.\n"); | 1032 | "has not completed.\n"); |
1040 | return ret_val; | 1033 | return ret_val; |
1041 | } | 1034 | } |
@@ -1100,10 +1093,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1100 | */ | 1093 | */ |
1101 | if (hw->fc.requested_mode == e1000_fc_full) { | 1094 | if (hw->fc.requested_mode == e1000_fc_full) { |
1102 | hw->fc.current_mode = e1000_fc_full; | 1095 | hw->fc.current_mode = e1000_fc_full; |
1103 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1096 | e_dbg("Flow Control = FULL.\r\n"); |
1104 | } else { | 1097 | } else { |
1105 | hw->fc.current_mode = e1000_fc_rx_pause; | 1098 | hw->fc.current_mode = e1000_fc_rx_pause; |
1106 | hw_dbg(hw, "Flow Control = " | 1099 | e_dbg("Flow Control = " |
1107 | "RX PAUSE frames only.\r\n"); | 1100 | "RX PAUSE frames only.\r\n"); |
1108 | } | 1101 | } |
1109 | } | 1102 | } |
@@ -1121,7 +1114,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1121 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1114 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1122 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1115 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1123 | hw->fc.current_mode = e1000_fc_tx_pause; | 1116 | hw->fc.current_mode = e1000_fc_tx_pause; |
1124 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); | 1117 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
1125 | } | 1118 | } |
1126 | /* | 1119 | /* |
1127 | * For transmitting PAUSE frames ONLY. | 1120 | * For transmitting PAUSE frames ONLY. |
@@ -1137,14 +1130,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1137 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1130 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1138 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1131 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1139 | hw->fc.current_mode = e1000_fc_rx_pause; | 1132 | hw->fc.current_mode = e1000_fc_rx_pause; |
1140 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); | 1133 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
1141 | } else { | 1134 | } else { |
1142 | /* | 1135 | /* |
1143 | * Per the IEEE spec, at this point flow control | 1136 | * Per the IEEE spec, at this point flow control |
1144 | * should be disabled. | 1137 | * should be disabled. |
1145 | */ | 1138 | */ |
1146 | hw->fc.current_mode = e1000_fc_none; | 1139 | hw->fc.current_mode = e1000_fc_none; |
1147 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1140 | e_dbg("Flow Control = NONE.\r\n"); |
1148 | } | 1141 | } |
1149 | 1142 | ||
1150 | /* | 1143 | /* |
@@ -1154,7 +1147,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1154 | */ | 1147 | */ |
1155 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | 1148 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
1156 | if (ret_val) { | 1149 | if (ret_val) { |
1157 | hw_dbg(hw, "Error getting link speed and duplex\n"); | 1150 | e_dbg("Error getting link speed and duplex\n"); |
1158 | return ret_val; | 1151 | return ret_val; |
1159 | } | 1152 | } |
1160 | 1153 | ||
@@ -1167,7 +1160,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1167 | */ | 1160 | */ |
1168 | ret_val = e1000e_force_mac_fc(hw); | 1161 | ret_val = e1000e_force_mac_fc(hw); |
1169 | if (ret_val) { | 1162 | if (ret_val) { |
1170 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1163 | e_dbg("Error forcing flow control settings\n"); |
1171 | return ret_val; | 1164 | return ret_val; |
1172 | } | 1165 | } |
1173 | } | 1166 | } |
@@ -1191,21 +1184,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1191 | status = er32(STATUS); | 1184 | status = er32(STATUS); |
1192 | if (status & E1000_STATUS_SPEED_1000) { | 1185 | if (status & E1000_STATUS_SPEED_1000) { |
1193 | *speed = SPEED_1000; | 1186 | *speed = SPEED_1000; |
1194 | hw_dbg(hw, "1000 Mbs, "); | 1187 | e_dbg("1000 Mbs, "); |
1195 | } else if (status & E1000_STATUS_SPEED_100) { | 1188 | } else if (status & E1000_STATUS_SPEED_100) { |
1196 | *speed = SPEED_100; | 1189 | *speed = SPEED_100; |
1197 | hw_dbg(hw, "100 Mbs, "); | 1190 | e_dbg("100 Mbs, "); |
1198 | } else { | 1191 | } else { |
1199 | *speed = SPEED_10; | 1192 | *speed = SPEED_10; |
1200 | hw_dbg(hw, "10 Mbs, "); | 1193 | e_dbg("10 Mbs, "); |
1201 | } | 1194 | } |
1202 | 1195 | ||
1203 | if (status & E1000_STATUS_FD) { | 1196 | if (status & E1000_STATUS_FD) { |
1204 | *duplex = FULL_DUPLEX; | 1197 | *duplex = FULL_DUPLEX; |
1205 | hw_dbg(hw, "Full Duplex\n"); | 1198 | e_dbg("Full Duplex\n"); |
1206 | } else { | 1199 | } else { |
1207 | *duplex = HALF_DUPLEX; | 1200 | *duplex = HALF_DUPLEX; |
1208 | hw_dbg(hw, "Half Duplex\n"); | 1201 | e_dbg("Half Duplex\n"); |
1209 | } | 1202 | } |
1210 | 1203 | ||
1211 | return 0; | 1204 | return 0; |
@@ -1251,7 +1244,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1251 | } | 1244 | } |
1252 | 1245 | ||
1253 | if (i == timeout) { | 1246 | if (i == timeout) { |
1254 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 1247 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
1255 | return -E1000_ERR_NVM; | 1248 | return -E1000_ERR_NVM; |
1256 | } | 1249 | } |
1257 | 1250 | ||
@@ -1270,7 +1263,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1270 | if (i == timeout) { | 1263 | if (i == timeout) { |
1271 | /* Release semaphores */ | 1264 | /* Release semaphores */ |
1272 | e1000e_put_hw_semaphore(hw); | 1265 | e1000e_put_hw_semaphore(hw); |
1273 | hw_dbg(hw, "Driver can't access the NVM\n"); | 1266 | e_dbg("Driver can't access the NVM\n"); |
1274 | return -E1000_ERR_NVM; | 1267 | return -E1000_ERR_NVM; |
1275 | } | 1268 | } |
1276 | 1269 | ||
@@ -1310,7 +1303,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | |||
1310 | } | 1303 | } |
1311 | 1304 | ||
1312 | if (i == AUTO_READ_DONE_TIMEOUT) { | 1305 | if (i == AUTO_READ_DONE_TIMEOUT) { |
1313 | hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); | 1306 | e_dbg("Auto read by HW from NVM has not completed.\n"); |
1314 | return -E1000_ERR_RESET; | 1307 | return -E1000_ERR_RESET; |
1315 | } | 1308 | } |
1316 | 1309 | ||
@@ -1331,7 +1324,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | |||
1331 | 1324 | ||
1332 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1325 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1333 | if (ret_val) { | 1326 | if (ret_val) { |
1334 | hw_dbg(hw, "NVM Read Error\n"); | 1327 | e_dbg("NVM Read Error\n"); |
1335 | return ret_val; | 1328 | return ret_val; |
1336 | } | 1329 | } |
1337 | 1330 | ||
@@ -1585,7 +1578,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | |||
1585 | } | 1578 | } |
1586 | 1579 | ||
1587 | if (!timeout) { | 1580 | if (!timeout) { |
1588 | hw_dbg(hw, "Master requests are pending.\n"); | 1581 | e_dbg("Master requests are pending.\n"); |
1589 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | 1582 | return -E1000_ERR_MASTER_REQUESTS_PENDING; |
1590 | } | 1583 | } |
1591 | 1584 | ||
@@ -1608,7 +1601,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) | |||
1608 | mac->ifs_step_size = IFS_STEP; | 1601 | mac->ifs_step_size = IFS_STEP; |
1609 | mac->ifs_ratio = IFS_RATIO; | 1602 | mac->ifs_ratio = IFS_RATIO; |
1610 | 1603 | ||
1611 | mac->in_ifs_mode = 0; | 1604 | mac->in_ifs_mode = false; |
1612 | ew32(AIT, 0); | 1605 | ew32(AIT, 0); |
1613 | } | 1606 | } |
1614 | 1607 | ||
@@ -1625,7 +1618,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1625 | 1618 | ||
1626 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | 1619 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { |
1627 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | 1620 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { |
1628 | mac->in_ifs_mode = 1; | 1621 | mac->in_ifs_mode = true; |
1629 | if (mac->current_ifs_val < mac->ifs_max_val) { | 1622 | if (mac->current_ifs_val < mac->ifs_max_val) { |
1630 | if (!mac->current_ifs_val) | 1623 | if (!mac->current_ifs_val) |
1631 | mac->current_ifs_val = mac->ifs_min_val; | 1624 | mac->current_ifs_val = mac->ifs_min_val; |
@@ -1639,7 +1632,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1639 | if (mac->in_ifs_mode && | 1632 | if (mac->in_ifs_mode && |
1640 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { | 1633 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { |
1641 | mac->current_ifs_val = 0; | 1634 | mac->current_ifs_val = 0; |
1642 | mac->in_ifs_mode = 0; | 1635 | mac->in_ifs_mode = false; |
1643 | ew32(AIT, 0); | 1636 | ew32(AIT, 0); |
1644 | } | 1637 | } |
1645 | } | 1638 | } |
@@ -1809,7 +1802,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) | |||
1809 | if (!timeout) { | 1802 | if (!timeout) { |
1810 | eecd &= ~E1000_EECD_REQ; | 1803 | eecd &= ~E1000_EECD_REQ; |
1811 | ew32(EECD, eecd); | 1804 | ew32(EECD, eecd); |
1812 | hw_dbg(hw, "Could not acquire NVM grant\n"); | 1805 | e_dbg("Could not acquire NVM grant\n"); |
1813 | return -E1000_ERR_NVM; | 1806 | return -E1000_ERR_NVM; |
1814 | } | 1807 | } |
1815 | 1808 | ||
@@ -1914,7 +1907,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1914 | } | 1907 | } |
1915 | 1908 | ||
1916 | if (!timeout) { | 1909 | if (!timeout) { |
1917 | hw_dbg(hw, "SPI NVM Status error\n"); | 1910 | e_dbg("SPI NVM Status error\n"); |
1918 | return -E1000_ERR_NVM; | 1911 | return -E1000_ERR_NVM; |
1919 | } | 1912 | } |
1920 | } | 1913 | } |
@@ -1943,7 +1936,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1943 | */ | 1936 | */ |
1944 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1937 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1945 | (words == 0)) { | 1938 | (words == 0)) { |
1946 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1939 | e_dbg("nvm parameter(s) out of bounds\n"); |
1947 | return -E1000_ERR_NVM; | 1940 | return -E1000_ERR_NVM; |
1948 | } | 1941 | } |
1949 | 1942 | ||
@@ -1986,11 +1979,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1986 | */ | 1979 | */ |
1987 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1980 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1988 | (words == 0)) { | 1981 | (words == 0)) { |
1989 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1982 | e_dbg("nvm parameter(s) out of bounds\n"); |
1990 | return -E1000_ERR_NVM; | 1983 | return -E1000_ERR_NVM; |
1991 | } | 1984 | } |
1992 | 1985 | ||
1993 | ret_val = nvm->ops.acquire_nvm(hw); | 1986 | ret_val = nvm->ops.acquire(hw); |
1994 | if (ret_val) | 1987 | if (ret_val) |
1995 | return ret_val; | 1988 | return ret_val; |
1996 | 1989 | ||
@@ -2001,7 +1994,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2001 | 1994 | ||
2002 | ret_val = e1000_ready_nvm_eeprom(hw); | 1995 | ret_val = e1000_ready_nvm_eeprom(hw); |
2003 | if (ret_val) { | 1996 | if (ret_val) { |
2004 | nvm->ops.release_nvm(hw); | 1997 | nvm->ops.release(hw); |
2005 | return ret_val; | 1998 | return ret_val; |
2006 | } | 1999 | } |
2007 | 2000 | ||
@@ -2040,7 +2033,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
2040 | } | 2033 | } |
2041 | 2034 | ||
2042 | msleep(10); | 2035 | msleep(10); |
2043 | nvm->ops.release_nvm(hw); | 2036 | nvm->ops.release(hw); |
2044 | return 0; | 2037 | return 0; |
2045 | } | 2038 | } |
2046 | 2039 | ||
@@ -2066,7 +2059,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2066 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 2059 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
2067 | &mac_addr_offset); | 2060 | &mac_addr_offset); |
2068 | if (ret_val) { | 2061 | if (ret_val) { |
2069 | hw_dbg(hw, "NVM Read Error\n"); | 2062 | e_dbg("NVM Read Error\n"); |
2070 | return ret_val; | 2063 | return ret_val; |
2071 | } | 2064 | } |
2072 | if (mac_addr_offset == 0xFFFF) | 2065 | if (mac_addr_offset == 0xFFFF) |
@@ -2081,7 +2074,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2081 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | 2074 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, |
2082 | &nvm_data); | 2075 | &nvm_data); |
2083 | if (ret_val) { | 2076 | if (ret_val) { |
2084 | hw_dbg(hw, "NVM Read Error\n"); | 2077 | e_dbg("NVM Read Error\n"); |
2085 | return ret_val; | 2078 | return ret_val; |
2086 | } | 2079 | } |
2087 | if (nvm_data & 0x0001) | 2080 | if (nvm_data & 0x0001) |
@@ -2096,7 +2089,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2096 | offset = mac_addr_offset + (i >> 1); | 2089 | offset = mac_addr_offset + (i >> 1); |
2097 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | 2090 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); |
2098 | if (ret_val) { | 2091 | if (ret_val) { |
2099 | hw_dbg(hw, "NVM Read Error\n"); | 2092 | e_dbg("NVM Read Error\n"); |
2100 | return ret_val; | 2093 | return ret_val; |
2101 | } | 2094 | } |
2102 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); | 2095 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); |
@@ -2129,14 +2122,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | |||
2129 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 2122 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
2130 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2123 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2131 | if (ret_val) { | 2124 | if (ret_val) { |
2132 | hw_dbg(hw, "NVM Read Error\n"); | 2125 | e_dbg("NVM Read Error\n"); |
2133 | return ret_val; | 2126 | return ret_val; |
2134 | } | 2127 | } |
2135 | checksum += nvm_data; | 2128 | checksum += nvm_data; |
2136 | } | 2129 | } |
2137 | 2130 | ||
2138 | if (checksum != (u16) NVM_SUM) { | 2131 | if (checksum != (u16) NVM_SUM) { |
2139 | hw_dbg(hw, "NVM Checksum Invalid\n"); | 2132 | e_dbg("NVM Checksum Invalid\n"); |
2140 | return -E1000_ERR_NVM; | 2133 | return -E1000_ERR_NVM; |
2141 | } | 2134 | } |
2142 | 2135 | ||
@@ -2160,7 +2153,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2160 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | 2153 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
2161 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2154 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2162 | if (ret_val) { | 2155 | if (ret_val) { |
2163 | hw_dbg(hw, "NVM Read Error while updating checksum.\n"); | 2156 | e_dbg("NVM Read Error while updating checksum.\n"); |
2164 | return ret_val; | 2157 | return ret_val; |
2165 | } | 2158 | } |
2166 | checksum += nvm_data; | 2159 | checksum += nvm_data; |
@@ -2168,7 +2161,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2168 | checksum = (u16) NVM_SUM - checksum; | 2161 | checksum = (u16) NVM_SUM - checksum; |
2169 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | 2162 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
2170 | if (ret_val) | 2163 | if (ret_val) |
2171 | hw_dbg(hw, "NVM Write Error while updating checksum.\n"); | 2164 | e_dbg("NVM Write Error while updating checksum.\n"); |
2172 | 2165 | ||
2173 | return ret_val; | 2166 | return ret_val; |
2174 | } | 2167 | } |
@@ -2231,7 +2224,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2231 | /* Check that the host interface is enabled. */ | 2224 | /* Check that the host interface is enabled. */ |
2232 | hicr = er32(HICR); | 2225 | hicr = er32(HICR); |
2233 | if ((hicr & E1000_HICR_EN) == 0) { | 2226 | if ((hicr & E1000_HICR_EN) == 0) { |
2234 | hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); | 2227 | e_dbg("E1000_HOST_EN bit disabled.\n"); |
2235 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2228 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2236 | } | 2229 | } |
2237 | /* check the previous command is completed */ | 2230 | /* check the previous command is completed */ |
@@ -2243,7 +2236,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2243 | } | 2236 | } |
2244 | 2237 | ||
2245 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 2238 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
2246 | hw_dbg(hw, "Previous command timeout failed .\n"); | 2239 | e_dbg("Previous command timeout failed .\n"); |
2247 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2240 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2248 | } | 2241 | } |
2249 | 2242 | ||
@@ -2282,7 +2275,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2282 | 2275 | ||
2283 | /* No manageability, no filtering */ | 2276 | /* No manageability, no filtering */ |
2284 | if (!e1000e_check_mng_mode(hw)) { | 2277 | if (!e1000e_check_mng_mode(hw)) { |
2285 | hw->mac.tx_pkt_filtering = 0; | 2278 | hw->mac.tx_pkt_filtering = false; |
2286 | return 0; | 2279 | return 0; |
2287 | } | 2280 | } |
2288 | 2281 | ||
@@ -2292,7 +2285,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2292 | */ | 2285 | */ |
2293 | ret_val = e1000_mng_enable_host_if(hw); | 2286 | ret_val = e1000_mng_enable_host_if(hw); |
2294 | if (ret_val != 0) { | 2287 | if (ret_val != 0) { |
2295 | hw->mac.tx_pkt_filtering = 0; | 2288 | hw->mac.tx_pkt_filtering = false; |
2296 | return ret_val; | 2289 | return ret_val; |
2297 | } | 2290 | } |
2298 | 2291 | ||
@@ -2311,17 +2304,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2311 | * take the safe route of assuming Tx filtering is enabled. | 2304 | * take the safe route of assuming Tx filtering is enabled. |
2312 | */ | 2305 | */ |
2313 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { | 2306 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { |
2314 | hw->mac.tx_pkt_filtering = 1; | 2307 | hw->mac.tx_pkt_filtering = true; |
2315 | return 1; | 2308 | return 1; |
2316 | } | 2309 | } |
2317 | 2310 | ||
2318 | /* Cookie area is valid, make the final check for filtering. */ | 2311 | /* Cookie area is valid, make the final check for filtering. */ |
2319 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { | 2312 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { |
2320 | hw->mac.tx_pkt_filtering = 0; | 2313 | hw->mac.tx_pkt_filtering = false; |
2321 | return 0; | 2314 | return 0; |
2322 | } | 2315 | } |
2323 | 2316 | ||
2324 | hw->mac.tx_pkt_filtering = 1; | 2317 | hw->mac.tx_pkt_filtering = true; |
2325 | return 1; | 2318 | return 1; |
2326 | } | 2319 | } |
2327 | 2320 | ||
@@ -2478,7 +2471,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2478 | { | 2471 | { |
2479 | u32 manc; | 2472 | u32 manc; |
2480 | u32 fwsm, factps; | 2473 | u32 fwsm, factps; |
2481 | bool ret_val = 0; | 2474 | bool ret_val = false; |
2482 | 2475 | ||
2483 | manc = er32(MANC); | 2476 | manc = er32(MANC); |
2484 | 2477 | ||
@@ -2493,13 +2486,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2493 | if (!(factps & E1000_FACTPS_MNGCG) && | 2486 | if (!(factps & E1000_FACTPS_MNGCG) && |
2494 | ((fwsm & E1000_FWSM_MODE_MASK) == | 2487 | ((fwsm & E1000_FWSM_MODE_MASK) == |
2495 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { | 2488 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { |
2496 | ret_val = 1; | 2489 | ret_val = true; |
2497 | return ret_val; | 2490 | return ret_val; |
2498 | } | 2491 | } |
2499 | } else { | 2492 | } else { |
2500 | if ((manc & E1000_MANC_SMBUS_EN) && | 2493 | if ((manc & E1000_MANC_SMBUS_EN) && |
2501 | !(manc & E1000_MANC_ASF_EN)) { | 2494 | !(manc & E1000_MANC_ASF_EN)) { |
2502 | ret_val = 1; | 2495 | ret_val = true; |
2503 | return ret_val; | 2496 | return ret_val; |
2504 | } | 2497 | } |
2505 | } | 2498 | } |
@@ -2514,14 +2507,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) | |||
2514 | 2507 | ||
2515 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | 2508 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
2516 | if (ret_val) { | 2509 | if (ret_val) { |
2517 | hw_dbg(hw, "NVM Read Error\n"); | 2510 | e_dbg("NVM Read Error\n"); |
2518 | return ret_val; | 2511 | return ret_val; |
2519 | } | 2512 | } |
2520 | *pba_num = (u32)(nvm_data << 16); | 2513 | *pba_num = (u32)(nvm_data << 16); |
2521 | 2514 | ||
2522 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2515 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2523 | if (ret_val) { | 2516 | if (ret_val) { |
2524 | hw_dbg(hw, "NVM Read Error\n"); | 2517 | e_dbg("NVM Read Error\n"); |
2525 | return ret_val; | 2518 | return ret_val; |
2526 | } | 2519 | } |
2527 | *pba_num |= nvm_data; | 2520 | *pba_num |= nvm_data; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fad8f9ea0043..e546b4ebf155 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
65 | [board_pchlan] = &e1000_pch_info, | 65 | [board_pchlan] = &e1000_pch_info, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | #ifdef DEBUG | ||
69 | /** | ||
70 | * e1000_get_hw_dev_name - return device name string | ||
71 | * used by hardware layer to print debugging information | ||
72 | **/ | ||
73 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | ||
74 | { | ||
75 | return hw->adapter->netdev->name; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | /** | 68 | /** |
80 | * e1000_desc_unused - calculate if we have unused descriptors | 69 | * e1000_desc_unused - calculate if we have unused descriptors |
81 | **/ | 70 | **/ |
@@ -167,7 +156,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
167 | struct e1000_buffer *buffer_info; | 156 | struct e1000_buffer *buffer_info; |
168 | struct sk_buff *skb; | 157 | struct sk_buff *skb; |
169 | unsigned int i; | 158 | unsigned int i; |
170 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | 159 | unsigned int bufsz = adapter->rx_buffer_len; |
171 | 160 | ||
172 | i = rx_ring->next_to_use; | 161 | i = rx_ring->next_to_use; |
173 | buffer_info = &rx_ring->buffer_info[i]; | 162 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -179,20 +168,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
179 | goto map_skb; | 168 | goto map_skb; |
180 | } | 169 | } |
181 | 170 | ||
182 | skb = netdev_alloc_skb(netdev, bufsz); | 171 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
183 | if (!skb) { | 172 | if (!skb) { |
184 | /* Better luck next round */ | 173 | /* Better luck next round */ |
185 | adapter->alloc_rx_buff_failed++; | 174 | adapter->alloc_rx_buff_failed++; |
186 | break; | 175 | break; |
187 | } | 176 | } |
188 | 177 | ||
189 | /* | ||
190 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
191 | * this will result in a 16 byte aligned IP header after | ||
192 | * the 14 byte MAC header is removed | ||
193 | */ | ||
194 | skb_reserve(skb, NET_IP_ALIGN); | ||
195 | |||
196 | buffer_info->skb = skb; | 178 | buffer_info->skb = skb; |
197 | map_skb: | 179 | map_skb: |
198 | buffer_info->dma = pci_map_single(pdev, skb->data, | 180 | buffer_info->dma = pci_map_single(pdev, skb->data, |
@@ -284,21 +266,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
284 | cpu_to_le64(ps_page->dma); | 266 | cpu_to_le64(ps_page->dma); |
285 | } | 267 | } |
286 | 268 | ||
287 | skb = netdev_alloc_skb(netdev, | 269 | skb = netdev_alloc_skb_ip_align(netdev, |
288 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 270 | adapter->rx_ps_bsize0); |
289 | 271 | ||
290 | if (!skb) { | 272 | if (!skb) { |
291 | adapter->alloc_rx_buff_failed++; | 273 | adapter->alloc_rx_buff_failed++; |
292 | break; | 274 | break; |
293 | } | 275 | } |
294 | 276 | ||
295 | /* | ||
296 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
297 | * this will result in a 16 byte aligned IP header after | ||
298 | * the 14 byte MAC header is removed | ||
299 | */ | ||
300 | skb_reserve(skb, NET_IP_ALIGN); | ||
301 | |||
302 | buffer_info->skb = skb; | 277 | buffer_info->skb = skb; |
303 | buffer_info->dma = pci_map_single(pdev, skb->data, | 278 | buffer_info->dma = pci_map_single(pdev, skb->data, |
304 | adapter->rx_ps_bsize0, | 279 | adapter->rx_ps_bsize0, |
@@ -359,9 +334,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
359 | struct e1000_buffer *buffer_info; | 334 | struct e1000_buffer *buffer_info; |
360 | struct sk_buff *skb; | 335 | struct sk_buff *skb; |
361 | unsigned int i; | 336 | unsigned int i; |
362 | unsigned int bufsz = 256 - | 337 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; |
363 | 16 /* for skb_reserve */ - | ||
364 | NET_IP_ALIGN; | ||
365 | 338 | ||
366 | i = rx_ring->next_to_use; | 339 | i = rx_ring->next_to_use; |
367 | buffer_info = &rx_ring->buffer_info[i]; | 340 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -373,19 +346,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
373 | goto check_page; | 346 | goto check_page; |
374 | } | 347 | } |
375 | 348 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | 349 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
377 | if (unlikely(!skb)) { | 350 | if (unlikely(!skb)) { |
378 | /* Better luck next round */ | 351 | /* Better luck next round */ |
379 | adapter->alloc_rx_buff_failed++; | 352 | adapter->alloc_rx_buff_failed++; |
380 | break; | 353 | break; |
381 | } | 354 | } |
382 | 355 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
384 | * this will result in a 16 byte aligned IP header after | ||
385 | * the 14 byte MAC header is removed | ||
386 | */ | ||
387 | skb_reserve(skb, NET_IP_ALIGN); | ||
388 | |||
389 | buffer_info->skb = skb; | 356 | buffer_info->skb = skb; |
390 | check_page: | 357 | check_page: |
391 | /* allocate a new page if necessary */ | 358 | /* allocate a new page if necessary */ |
@@ -437,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
437 | { | 404 | { |
438 | struct net_device *netdev = adapter->netdev; | 405 | struct net_device *netdev = adapter->netdev; |
439 | struct pci_dev *pdev = adapter->pdev; | 406 | struct pci_dev *pdev = adapter->pdev; |
407 | struct e1000_hw *hw = &adapter->hw; | ||
440 | struct e1000_ring *rx_ring = adapter->rx_ring; | 408 | struct e1000_ring *rx_ring = adapter->rx_ring; |
441 | struct e1000_rx_desc *rx_desc, *next_rxd; | 409 | struct e1000_rx_desc *rx_desc, *next_rxd; |
442 | struct e1000_buffer *buffer_info, *next_buffer; | 410 | struct e1000_buffer *buffer_info, *next_buffer; |
@@ -486,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
486 | * packet, also make sure the frame isn't just CRC only */ | 454 | * packet, also make sure the frame isn't just CRC only */ |
487 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
488 | /* All receives must fit into a single buffer */ | 456 | /* All receives must fit into a single buffer */ |
489 | e_dbg("%s: Receive packet consumed multiple buffers\n", | 457 | e_dbg("Receive packet consumed multiple buffers\n"); |
490 | netdev->name); | ||
491 | /* recycle */ | 458 | /* recycle */ |
492 | buffer_info->skb = skb; | 459 | buffer_info->skb = skb; |
493 | goto next_desc; | 460 | goto next_desc; |
@@ -513,9 +480,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
513 | */ | 480 | */ |
514 | if (length < copybreak) { | 481 | if (length < copybreak) { |
515 | struct sk_buff *new_skb = | 482 | struct sk_buff *new_skb = |
516 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 483 | netdev_alloc_skb_ip_align(netdev, length); |
517 | if (new_skb) { | 484 | if (new_skb) { |
518 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
519 | skb_copy_to_linear_data_offset(new_skb, | 485 | skb_copy_to_linear_data_offset(new_skb, |
520 | -NET_IP_ALIGN, | 486 | -NET_IP_ALIGN, |
521 | (skb->data - | 487 | (skb->data - |
@@ -560,8 +526,8 @@ next_desc: | |||
560 | 526 | ||
561 | adapter->total_rx_bytes += total_rx_bytes; | 527 | adapter->total_rx_bytes += total_rx_bytes; |
562 | adapter->total_rx_packets += total_rx_packets; | 528 | adapter->total_rx_packets += total_rx_packets; |
563 | adapter->net_stats.rx_bytes += total_rx_bytes; | 529 | netdev->stats.rx_bytes += total_rx_bytes; |
564 | adapter->net_stats.rx_packets += total_rx_packets; | 530 | netdev->stats.rx_packets += total_rx_packets; |
565 | return cleaned; | 531 | return cleaned; |
566 | } | 532 | } |
567 | 533 | ||
@@ -578,15 +544,27 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter, | |||
578 | buffer_info->time_stamp = 0; | 544 | buffer_info->time_stamp = 0; |
579 | } | 545 | } |
580 | 546 | ||
581 | static void e1000_print_tx_hang(struct e1000_adapter *adapter) | 547 | static void e1000_print_hw_hang(struct work_struct *work) |
582 | { | 548 | { |
549 | struct e1000_adapter *adapter = container_of(work, | ||
550 | struct e1000_adapter, | ||
551 | print_hang_task); | ||
583 | struct e1000_ring *tx_ring = adapter->tx_ring; | 552 | struct e1000_ring *tx_ring = adapter->tx_ring; |
584 | unsigned int i = tx_ring->next_to_clean; | 553 | unsigned int i = tx_ring->next_to_clean; |
585 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | 554 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
586 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | 555 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
556 | struct e1000_hw *hw = &adapter->hw; | ||
557 | u16 phy_status, phy_1000t_status, phy_ext_status; | ||
558 | u16 pci_status; | ||
559 | |||
560 | e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
561 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | ||
562 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | ||
587 | 563 | ||
588 | /* detected Tx unit hang */ | 564 | pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); |
589 | e_err("Detected Tx Unit Hang:\n" | 565 | |
566 | /* detected Hardware unit hang */ | ||
567 | e_err("Detected Hardware Unit Hang:\n" | ||
590 | " TDH <%x>\n" | 568 | " TDH <%x>\n" |
591 | " TDT <%x>\n" | 569 | " TDT <%x>\n" |
592 | " next_to_use <%x>\n" | 570 | " next_to_use <%x>\n" |
@@ -595,7 +573,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
595 | " time_stamp <%lx>\n" | 573 | " time_stamp <%lx>\n" |
596 | " next_to_watch <%x>\n" | 574 | " next_to_watch <%x>\n" |
597 | " jiffies <%lx>\n" | 575 | " jiffies <%lx>\n" |
598 | " next_to_watch.status <%x>\n", | 576 | " next_to_watch.status <%x>\n" |
577 | "MAC Status <%x>\n" | ||
578 | "PHY Status <%x>\n" | ||
579 | "PHY 1000BASE-T Status <%x>\n" | ||
580 | "PHY Extended Status <%x>\n" | ||
581 | "PCI Status <%x>\n", | ||
599 | readl(adapter->hw.hw_addr + tx_ring->head), | 582 | readl(adapter->hw.hw_addr + tx_ring->head), |
600 | readl(adapter->hw.hw_addr + tx_ring->tail), | 583 | readl(adapter->hw.hw_addr + tx_ring->tail), |
601 | tx_ring->next_to_use, | 584 | tx_ring->next_to_use, |
@@ -603,7 +586,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
603 | tx_ring->buffer_info[eop].time_stamp, | 586 | tx_ring->buffer_info[eop].time_stamp, |
604 | eop, | 587 | eop, |
605 | jiffies, | 588 | jiffies, |
606 | eop_desc->upper.fields.status); | 589 | eop_desc->upper.fields.status, |
590 | er32(STATUS), | ||
591 | phy_status, | ||
592 | phy_1000t_status, | ||
593 | phy_ext_status, | ||
594 | pci_status); | ||
607 | } | 595 | } |
608 | 596 | ||
609 | /** | 597 | /** |
@@ -677,21 +665,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
677 | } | 665 | } |
678 | 666 | ||
679 | if (adapter->detect_tx_hung) { | 667 | if (adapter->detect_tx_hung) { |
680 | /* Detect a transmit hang in hardware, this serializes the | 668 | /* |
681 | * check with the clearing of time_stamp and movement of i */ | 669 | * Detect a transmit hang in hardware, this serializes the |
670 | * check with the clearing of time_stamp and movement of i | ||
671 | */ | ||
682 | adapter->detect_tx_hung = 0; | 672 | adapter->detect_tx_hung = 0; |
683 | if (tx_ring->buffer_info[i].time_stamp && | 673 | if (tx_ring->buffer_info[i].time_stamp && |
684 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp | 674 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp |
685 | + (adapter->tx_timeout_factor * HZ)) | 675 | + (adapter->tx_timeout_factor * HZ)) |
686 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 676 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
687 | e1000_print_tx_hang(adapter); | 677 | schedule_work(&adapter->print_hang_task); |
688 | netif_stop_queue(netdev); | 678 | netif_stop_queue(netdev); |
689 | } | 679 | } |
690 | } | 680 | } |
691 | adapter->total_tx_bytes += total_tx_bytes; | 681 | adapter->total_tx_bytes += total_tx_bytes; |
692 | adapter->total_tx_packets += total_tx_packets; | 682 | adapter->total_tx_packets += total_tx_packets; |
693 | adapter->net_stats.tx_bytes += total_tx_bytes; | 683 | netdev->stats.tx_bytes += total_tx_bytes; |
694 | adapter->net_stats.tx_packets += total_tx_packets; | 684 | netdev->stats.tx_packets += total_tx_packets; |
695 | return (count < tx_ring->count); | 685 | return (count < tx_ring->count); |
696 | } | 686 | } |
697 | 687 | ||
@@ -705,6 +695,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
705 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 695 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
706 | int *work_done, int work_to_do) | 696 | int *work_done, int work_to_do) |
707 | { | 697 | { |
698 | struct e1000_hw *hw = &adapter->hw; | ||
708 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 699 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
709 | struct net_device *netdev = adapter->netdev; | 700 | struct net_device *netdev = adapter->netdev; |
710 | struct pci_dev *pdev = adapter->pdev; | 701 | struct pci_dev *pdev = adapter->pdev; |
@@ -748,8 +739,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
748 | buffer_info->dma = 0; | 739 | buffer_info->dma = 0; |
749 | 740 | ||
750 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 741 | if (!(staterr & E1000_RXD_STAT_EOP)) { |
751 | e_dbg("%s: Packet Split buffers didn't pick up the " | 742 | e_dbg("Packet Split buffers didn't pick up the full " |
752 | "full packet\n", netdev->name); | 743 | "packet\n"); |
753 | dev_kfree_skb_irq(skb); | 744 | dev_kfree_skb_irq(skb); |
754 | goto next_desc; | 745 | goto next_desc; |
755 | } | 746 | } |
@@ -762,8 +753,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
762 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 753 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
763 | 754 | ||
764 | if (!length) { | 755 | if (!length) { |
765 | e_dbg("%s: Last part of the packet spanning multiple " | 756 | e_dbg("Last part of the packet spanning multiple " |
766 | "descriptors\n", netdev->name); | 757 | "descriptors\n"); |
767 | dev_kfree_skb_irq(skb); | 758 | dev_kfree_skb_irq(skb); |
768 | goto next_desc; | 759 | goto next_desc; |
769 | } | 760 | } |
@@ -871,8 +862,8 @@ next_desc: | |||
871 | 862 | ||
872 | adapter->total_rx_bytes += total_rx_bytes; | 863 | adapter->total_rx_bytes += total_rx_bytes; |
873 | adapter->total_rx_packets += total_rx_packets; | 864 | adapter->total_rx_packets += total_rx_packets; |
874 | adapter->net_stats.rx_bytes += total_rx_bytes; | 865 | netdev->stats.rx_bytes += total_rx_bytes; |
875 | adapter->net_stats.rx_packets += total_rx_packets; | 866 | netdev->stats.rx_packets += total_rx_packets; |
876 | return cleaned; | 867 | return cleaned; |
877 | } | 868 | } |
878 | 869 | ||
@@ -1051,8 +1042,8 @@ next_desc: | |||
1051 | 1042 | ||
1052 | adapter->total_rx_bytes += total_rx_bytes; | 1043 | adapter->total_rx_bytes += total_rx_bytes; |
1053 | adapter->total_rx_packets += total_rx_packets; | 1044 | adapter->total_rx_packets += total_rx_packets; |
1054 | adapter->net_stats.rx_bytes += total_rx_bytes; | 1045 | netdev->stats.rx_bytes += total_rx_bytes; |
1055 | adapter->net_stats.rx_packets += total_rx_packets; | 1046 | netdev->stats.rx_packets += total_rx_packets; |
1056 | return cleaned; | 1047 | return cleaned; |
1057 | } | 1048 | } |
1058 | 1049 | ||
@@ -1199,7 +1190,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1199 | struct e1000_hw *hw = &adapter->hw; | 1190 | struct e1000_hw *hw = &adapter->hw; |
1200 | u32 rctl, icr = er32(ICR); | 1191 | u32 rctl, icr = er32(ICR); |
1201 | 1192 | ||
1202 | if (!icr) | 1193 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) |
1203 | return IRQ_NONE; /* Not our interrupt */ | 1194 | return IRQ_NONE; /* Not our interrupt */ |
1204 | 1195 | ||
1205 | /* | 1196 | /* |
@@ -1481,7 +1472,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1481 | else | 1472 | else |
1482 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | 1473 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); |
1483 | err = request_irq(adapter->msix_entries[vector].vector, | 1474 | err = request_irq(adapter->msix_entries[vector].vector, |
1484 | &e1000_intr_msix_rx, 0, adapter->rx_ring->name, | 1475 | e1000_intr_msix_rx, 0, adapter->rx_ring->name, |
1485 | netdev); | 1476 | netdev); |
1486 | if (err) | 1477 | if (err) |
1487 | goto out; | 1478 | goto out; |
@@ -1494,7 +1485,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1494 | else | 1485 | else |
1495 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | 1486 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); |
1496 | err = request_irq(adapter->msix_entries[vector].vector, | 1487 | err = request_irq(adapter->msix_entries[vector].vector, |
1497 | &e1000_intr_msix_tx, 0, adapter->tx_ring->name, | 1488 | e1000_intr_msix_tx, 0, adapter->tx_ring->name, |
1498 | netdev); | 1489 | netdev); |
1499 | if (err) | 1490 | if (err) |
1500 | goto out; | 1491 | goto out; |
@@ -1503,7 +1494,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) | |||
1503 | vector++; | 1494 | vector++; |
1504 | 1495 | ||
1505 | err = request_irq(adapter->msix_entries[vector].vector, | 1496 | err = request_irq(adapter->msix_entries[vector].vector, |
1506 | &e1000_msix_other, 0, netdev->name, netdev); | 1497 | e1000_msix_other, 0, netdev->name, netdev); |
1507 | if (err) | 1498 | if (err) |
1508 | goto out; | 1499 | goto out; |
1509 | 1500 | ||
@@ -1534,7 +1525,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1534 | e1000e_set_interrupt_capability(adapter); | 1525 | e1000e_set_interrupt_capability(adapter); |
1535 | } | 1526 | } |
1536 | if (adapter->flags & FLAG_MSI_ENABLED) { | 1527 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1537 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, | 1528 | err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, |
1538 | netdev->name, netdev); | 1529 | netdev->name, netdev); |
1539 | if (!err) | 1530 | if (!err) |
1540 | return err; | 1531 | return err; |
@@ -1544,7 +1535,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1544 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | 1535 | adapter->int_mode = E1000E_INT_MODE_LEGACY; |
1545 | } | 1536 | } |
1546 | 1537 | ||
1547 | err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, | 1538 | err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, |
1548 | netdev->name, netdev); | 1539 | netdev->name, netdev); |
1549 | if (err) | 1540 | if (err) |
1550 | e_err("Unable to allocate interrupt, Error: %d\n", err); | 1541 | e_err("Unable to allocate interrupt, Error: %d\n", err); |
@@ -2464,8 +2455,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2464 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | 2455 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2465 | 2456 | ||
2466 | ctrl_ext = er32(CTRL_EXT); | 2457 | ctrl_ext = er32(CTRL_EXT); |
2467 | /* Reset delay timers after every interrupt */ | ||
2468 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | ||
2469 | /* Auto-Mask interrupts upon ICR access */ | 2458 | /* Auto-Mask interrupts upon ICR access */ |
2470 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2459 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2471 | ew32(IAM, 0xffffffff); | 2460 | ew32(IAM, 0xffffffff); |
@@ -2507,21 +2496,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2507 | * packet size is equal or larger than the specified value (in 8 byte | 2496 | * packet size is equal or larger than the specified value (in 8 byte |
2508 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | 2497 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
2509 | */ | 2498 | */ |
2510 | if ((adapter->flags & FLAG_HAS_ERT) && | 2499 | if (adapter->flags & FLAG_HAS_ERT) { |
2511 | (adapter->netdev->mtu > ETH_DATA_LEN)) { | 2500 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2512 | u32 rxdctl = er32(RXDCTL(0)); | 2501 | u32 rxdctl = er32(RXDCTL(0)); |
2513 | ew32(RXDCTL(0), rxdctl | 0x3); | 2502 | ew32(RXDCTL(0), rxdctl | 0x3); |
2514 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | 2503 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); |
2515 | /* | 2504 | /* |
2516 | * With jumbo frames and early-receive enabled, excessive | 2505 | * With jumbo frames and early-receive enabled, |
2517 | * C4->C2 latencies result in dropped transactions. | 2506 | * excessive C-state transition latencies result in |
2518 | */ | 2507 | * dropped transactions. |
2519 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2508 | */ |
2520 | e1000e_driver_name, 55); | 2509 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2521 | } else { | 2510 | adapter->netdev->name, 55); |
2522 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | 2511 | } else { |
2523 | e1000e_driver_name, | 2512 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, |
2524 | PM_QOS_DEFAULT_VALUE); | 2513 | adapter->netdev->name, |
2514 | PM_QOS_DEFAULT_VALUE); | ||
2515 | } | ||
2525 | } | 2516 | } |
2526 | 2517 | ||
2527 | /* Enable Receives */ | 2518 | /* Enable Receives */ |
@@ -2856,6 +2847,12 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
2856 | { | 2847 | { |
2857 | struct e1000_hw *hw = &adapter->hw; | 2848 | struct e1000_hw *hw = &adapter->hw; |
2858 | 2849 | ||
2850 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | ||
2851 | if (adapter->flags & FLAG_HAS_ERT) | ||
2852 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2853 | adapter->netdev->name, | ||
2854 | PM_QOS_DEFAULT_VALUE); | ||
2855 | |||
2859 | /* hardware has been reset, we need to reload some things */ | 2856 | /* hardware has been reset, we need to reload some things */ |
2860 | e1000_configure(adapter); | 2857 | e1000_configure(adapter); |
2861 | 2858 | ||
@@ -2916,6 +2913,10 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2916 | e1000_clean_tx_ring(adapter); | 2913 | e1000_clean_tx_ring(adapter); |
2917 | e1000_clean_rx_ring(adapter); | 2914 | e1000_clean_rx_ring(adapter); |
2918 | 2915 | ||
2916 | if (adapter->flags & FLAG_HAS_ERT) | ||
2917 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2918 | adapter->netdev->name); | ||
2919 | |||
2919 | /* | 2920 | /* |
2920 | * TODO: for power management, we could drop the link and | 2921 | * TODO: for power management, we could drop the link and |
2921 | * pci_disable_device here. | 2922 | * pci_disable_device here. |
@@ -2973,7 +2974,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
2973 | struct e1000_hw *hw = &adapter->hw; | 2974 | struct e1000_hw *hw = &adapter->hw; |
2974 | u32 icr = er32(ICR); | 2975 | u32 icr = er32(ICR); |
2975 | 2976 | ||
2976 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | 2977 | e_dbg("icr is %08X\n", icr); |
2977 | if (icr & E1000_ICR_RXSEQ) { | 2978 | if (icr & E1000_ICR_RXSEQ) { |
2978 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 2979 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
2979 | wmb(); | 2980 | wmb(); |
@@ -3010,7 +3011,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3010 | if (err) | 3011 | if (err) |
3011 | goto msi_test_failed; | 3012 | goto msi_test_failed; |
3012 | 3013 | ||
3013 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | 3014 | err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, |
3014 | netdev->name, netdev); | 3015 | netdev->name, netdev); |
3015 | if (err) { | 3016 | if (err) { |
3016 | pci_disable_msi(adapter->pdev); | 3017 | pci_disable_msi(adapter->pdev); |
@@ -3043,7 +3044,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3043 | goto msi_test_failed; | 3044 | goto msi_test_failed; |
3044 | 3045 | ||
3045 | /* okay so the test worked, restore settings */ | 3046 | /* okay so the test worked, restore settings */ |
3046 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | 3047 | e_dbg("MSI interrupt test succeeded!\n"); |
3047 | msi_test_failed: | 3048 | msi_test_failed: |
3048 | e1000e_set_interrupt_capability(adapter); | 3049 | e1000e_set_interrupt_capability(adapter); |
3049 | e1000_request_irq(adapter); | 3050 | e1000_request_irq(adapter); |
@@ -3304,6 +3305,7 @@ static void e1000_update_phy_info(unsigned long data) | |||
3304 | **/ | 3305 | **/ |
3305 | void e1000e_update_stats(struct e1000_adapter *adapter) | 3306 | void e1000e_update_stats(struct e1000_adapter *adapter) |
3306 | { | 3307 | { |
3308 | struct net_device *netdev = adapter->netdev; | ||
3307 | struct e1000_hw *hw = &adapter->hw; | 3309 | struct e1000_hw *hw = &adapter->hw; |
3308 | struct pci_dev *pdev = adapter->pdev; | 3310 | struct pci_dev *pdev = adapter->pdev; |
3309 | u16 phy_data; | 3311 | u16 phy_data; |
@@ -3398,8 +3400,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3398 | adapter->stats.tsctfc += er32(TSCTFC); | 3400 | adapter->stats.tsctfc += er32(TSCTFC); |
3399 | 3401 | ||
3400 | /* Fill out the OS statistics structure */ | 3402 | /* Fill out the OS statistics structure */ |
3401 | adapter->net_stats.multicast = adapter->stats.mprc; | 3403 | netdev->stats.multicast = adapter->stats.mprc; |
3402 | adapter->net_stats.collisions = adapter->stats.colc; | 3404 | netdev->stats.collisions = adapter->stats.colc; |
3403 | 3405 | ||
3404 | /* Rx Errors */ | 3406 | /* Rx Errors */ |
3405 | 3407 | ||
@@ -3407,22 +3409,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3407 | * RLEC on some newer hardware can be incorrect so build | 3409 | * RLEC on some newer hardware can be incorrect so build |
3408 | * our own version based on RUC and ROC | 3410 | * our own version based on RUC and ROC |
3409 | */ | 3411 | */ |
3410 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3412 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3411 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3413 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3412 | adapter->stats.ruc + adapter->stats.roc + | 3414 | adapter->stats.ruc + adapter->stats.roc + |
3413 | adapter->stats.cexterr; | 3415 | adapter->stats.cexterr; |
3414 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 3416 | netdev->stats.rx_length_errors = adapter->stats.ruc + |
3415 | adapter->stats.roc; | 3417 | adapter->stats.roc; |
3416 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3418 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
3417 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3419 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
3418 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3420 | netdev->stats.rx_missed_errors = adapter->stats.mpc; |
3419 | 3421 | ||
3420 | /* Tx Errors */ | 3422 | /* Tx Errors */ |
3421 | adapter->net_stats.tx_errors = adapter->stats.ecol + | 3423 | netdev->stats.tx_errors = adapter->stats.ecol + |
3422 | adapter->stats.latecol; | 3424 | adapter->stats.latecol; |
3423 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3425 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
3424 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3426 | netdev->stats.tx_window_errors = adapter->stats.latecol; |
3425 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3427 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
3426 | 3428 | ||
3427 | /* Tx Dropped needs to be maintained elsewhere */ | 3429 | /* Tx Dropped needs to be maintained elsewhere */ |
3428 | 3430 | ||
@@ -3776,68 +3778,64 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3776 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | 3778 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
3777 | int err; | 3779 | int err; |
3778 | 3780 | ||
3779 | if (skb_is_gso(skb)) { | 3781 | if (!skb_is_gso(skb)) |
3780 | if (skb_header_cloned(skb)) { | 3782 | return 0; |
3781 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
3782 | if (err) | ||
3783 | return err; | ||
3784 | } | ||
3785 | 3783 | ||
3786 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3784 | if (skb_header_cloned(skb)) { |
3787 | mss = skb_shinfo(skb)->gso_size; | 3785 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
3788 | if (skb->protocol == htons(ETH_P_IP)) { | 3786 | if (err) |
3789 | struct iphdr *iph = ip_hdr(skb); | 3787 | return err; |
3790 | iph->tot_len = 0; | 3788 | } |
3791 | iph->check = 0; | ||
3792 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
3793 | iph->daddr, 0, | ||
3794 | IPPROTO_TCP, | ||
3795 | 0); | ||
3796 | cmd_length = E1000_TXD_CMD_IP; | ||
3797 | ipcse = skb_transport_offset(skb) - 1; | ||
3798 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3799 | ipv6_hdr(skb)->payload_len = 0; | ||
3800 | tcp_hdr(skb)->check = | ||
3801 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3802 | &ipv6_hdr(skb)->daddr, | ||
3803 | 0, IPPROTO_TCP, 0); | ||
3804 | ipcse = 0; | ||
3805 | } | ||
3806 | ipcss = skb_network_offset(skb); | ||
3807 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3808 | tucss = skb_transport_offset(skb); | ||
3809 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3810 | tucse = 0; | ||
3811 | 3789 | ||
3812 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 3790 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3813 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | 3791 | mss = skb_shinfo(skb)->gso_size; |
3792 | if (skb->protocol == htons(ETH_P_IP)) { | ||
3793 | struct iphdr *iph = ip_hdr(skb); | ||
3794 | iph->tot_len = 0; | ||
3795 | iph->check = 0; | ||
3796 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
3797 | 0, IPPROTO_TCP, 0); | ||
3798 | cmd_length = E1000_TXD_CMD_IP; | ||
3799 | ipcse = skb_transport_offset(skb) - 1; | ||
3800 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
3801 | ipv6_hdr(skb)->payload_len = 0; | ||
3802 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
3803 | &ipv6_hdr(skb)->daddr, | ||
3804 | 0, IPPROTO_TCP, 0); | ||
3805 | ipcse = 0; | ||
3806 | } | ||
3807 | ipcss = skb_network_offset(skb); | ||
3808 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
3809 | tucss = skb_transport_offset(skb); | ||
3810 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
3811 | tucse = 0; | ||
3814 | 3812 | ||
3815 | i = tx_ring->next_to_use; | 3813 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
3816 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 3814 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
3817 | buffer_info = &tx_ring->buffer_info[i]; | ||
3818 | 3815 | ||
3819 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | 3816 | i = tx_ring->next_to_use; |
3820 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | 3817 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3821 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | 3818 | buffer_info = &tx_ring->buffer_info[i]; |
3822 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3823 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3824 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3825 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3826 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3827 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3828 | 3819 | ||
3829 | buffer_info->time_stamp = jiffies; | 3820 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
3830 | buffer_info->next_to_watch = i; | 3821 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
3822 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | ||
3823 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
3824 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
3825 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
3826 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
3827 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
3828 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
3831 | 3829 | ||
3832 | i++; | 3830 | buffer_info->time_stamp = jiffies; |
3833 | if (i == tx_ring->count) | 3831 | buffer_info->next_to_watch = i; |
3834 | i = 0; | ||
3835 | tx_ring->next_to_use = i; | ||
3836 | 3832 | ||
3837 | return 1; | 3833 | i++; |
3838 | } | 3834 | if (i == tx_ring->count) |
3835 | i = 0; | ||
3836 | tx_ring->next_to_use = i; | ||
3839 | 3837 | ||
3840 | return 0; | 3838 | return 1; |
3841 | } | 3839 | } |
3842 | 3840 | ||
3843 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | 3841 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
@@ -4271,10 +4269,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
4271 | **/ | 4269 | **/ |
4272 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | 4270 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
4273 | { | 4271 | { |
4274 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4275 | |||
4276 | /* only return the current stats */ | 4272 | /* only return the current stats */ |
4277 | return &adapter->net_stats; | 4273 | return &netdev->stats; |
4278 | } | 4274 | } |
4279 | 4275 | ||
4280 | /** | 4276 | /** |
@@ -4362,6 +4358,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
4362 | data->phy_id = adapter->hw.phy.addr; | 4358 | data->phy_id = adapter->hw.phy.addr; |
4363 | break; | 4359 | break; |
4364 | case SIOCGMIIREG: | 4360 | case SIOCGMIIREG: |
4361 | e1000_phy_read_status(adapter); | ||
4362 | |||
4365 | switch (data->reg_num & 0x1F) { | 4363 | switch (data->reg_num & 0x1F) { |
4366 | case MII_BMCR: | 4364 | case MII_BMCR: |
4367 | data->val_out = adapter->phy_regs.bmcr; | 4365 | data->val_out = adapter->phy_regs.bmcr; |
@@ -4469,7 +4467,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4469 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | 4467 | e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); |
4470 | 4468 | ||
4471 | /* activate PHY wakeup */ | 4469 | /* activate PHY wakeup */ |
4472 | retval = hw->phy.ops.acquire_phy(hw); | 4470 | retval = hw->phy.ops.acquire(hw); |
4473 | if (retval) { | 4471 | if (retval) { |
4474 | e_err("Could not acquire PHY\n"); | 4472 | e_err("Could not acquire PHY\n"); |
4475 | return retval; | 4473 | return retval; |
@@ -4486,7 +4484,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
4486 | if (retval) | 4484 | if (retval) |
4487 | e_err("Could not set PHY Host Wakeup bit\n"); | 4485 | e_err("Could not set PHY Host Wakeup bit\n"); |
4488 | out: | 4486 | out: |
4489 | hw->phy.ops.release_phy(hw); | 4487 | hw->phy.ops.release(hw); |
4490 | 4488 | ||
4491 | return retval; | 4489 | return retval; |
4492 | } | 4490 | } |
@@ -5160,6 +5158,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5160 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | 5158 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
5161 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | 5159 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); |
5162 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | 5160 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
5161 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | ||
5163 | 5162 | ||
5164 | /* Initialize link parameters. User can change them with ethtool */ | 5163 | /* Initialize link parameters. User can change them with ethtool */ |
5165 | adapter->hw.mac.autoneg = 1; | 5164 | adapter->hw.mac.autoneg = 1; |
@@ -5283,6 +5282,11 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5283 | del_timer_sync(&adapter->watchdog_timer); | 5282 | del_timer_sync(&adapter->watchdog_timer); |
5284 | del_timer_sync(&adapter->phy_info_timer); | 5283 | del_timer_sync(&adapter->phy_info_timer); |
5285 | 5284 | ||
5285 | cancel_work_sync(&adapter->reset_task); | ||
5286 | cancel_work_sync(&adapter->watchdog_task); | ||
5287 | cancel_work_sync(&adapter->downshift_task); | ||
5288 | cancel_work_sync(&adapter->update_phy_task); | ||
5289 | cancel_work_sync(&adapter->print_hang_task); | ||
5286 | flush_scheduled_work(); | 5290 | flush_scheduled_work(); |
5287 | 5291 | ||
5288 | /* | 5292 | /* |
@@ -5414,12 +5418,10 @@ static int __init e1000_init_module(void) | |||
5414 | int ret; | 5418 | int ret; |
5415 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | 5419 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
5416 | e1000e_driver_name, e1000e_driver_version); | 5420 | e1000e_driver_name, e1000e_driver_version); |
5417 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", | 5421 | printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", |
5418 | e1000e_driver_name); | 5422 | e1000e_driver_name); |
5419 | ret = pci_register_driver(&e1000_driver); | 5423 | ret = pci_register_driver(&e1000_driver); |
5420 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, | 5424 | |
5421 | PM_QOS_DEFAULT_VALUE); | ||
5422 | |||
5423 | return ret; | 5425 | return ret; |
5424 | } | 5426 | } |
5425 | module_init(e1000_init_module); | 5427 | module_init(e1000_init_module); |
@@ -5433,7 +5435,6 @@ module_init(e1000_init_module); | |||
5433 | static void __exit e1000_exit_module(void) | 5435 | static void __exit e1000_exit_module(void) |
5434 | { | 5436 | { |
5435 | pci_unregister_driver(&e1000_driver); | 5437 | pci_unregister_driver(&e1000_driver); |
5436 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); | ||
5437 | } | 5438 | } |
5438 | module_exit(e1000_exit_module); | 5439 | module_exit(e1000_exit_module); |
5439 | 5440 | ||
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index 1342e0b1815c..2e399778cae5 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 85f955f70417..5cd01c691c53 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -130,7 +130,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
130 | u16 phy_id; | 130 | u16 phy_id; |
131 | u16 retry_count = 0; | 131 | u16 retry_count = 0; |
132 | 132 | ||
133 | if (!(phy->ops.read_phy_reg)) | 133 | if (!(phy->ops.read_reg)) |
134 | goto out; | 134 | goto out; |
135 | 135 | ||
136 | while (retry_count < 2) { | 136 | while (retry_count < 2) { |
@@ -156,24 +156,24 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
156 | * MDIC mode. No harm in trying again in this case since | 156 | * MDIC mode. No harm in trying again in this case since |
157 | * the PHY ID is unknown at this point anyway | 157 | * the PHY ID is unknown at this point anyway |
158 | */ | 158 | */ |
159 | ret_val = phy->ops.acquire_phy(hw); | 159 | ret_val = phy->ops.acquire(hw); |
160 | if (ret_val) | 160 | if (ret_val) |
161 | goto out; | 161 | goto out; |
162 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | 162 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); |
163 | if (ret_val) | 163 | if (ret_val) |
164 | goto out; | 164 | goto out; |
165 | phy->ops.release_phy(hw); | 165 | phy->ops.release(hw); |
166 | 166 | ||
167 | retry_count++; | 167 | retry_count++; |
168 | } | 168 | } |
169 | out: | 169 | out: |
170 | /* Revert to MDIO fast mode, if applicable */ | 170 | /* Revert to MDIO fast mode, if applicable */ |
171 | if (retry_count) { | 171 | if (retry_count) { |
172 | ret_val = phy->ops.acquire_phy(hw); | 172 | ret_val = phy->ops.acquire(hw); |
173 | if (ret_val) | 173 | if (ret_val) |
174 | return ret_val; | 174 | return ret_val; |
175 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 175 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); |
176 | phy->ops.release_phy(hw); | 176 | phy->ops.release(hw); |
177 | } | 177 | } |
178 | 178 | ||
179 | return ret_val; | 179 | return ret_val; |
@@ -211,7 +211,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
211 | u32 i, mdic = 0; | 211 | u32 i, mdic = 0; |
212 | 212 | ||
213 | if (offset > MAX_PHY_REG_ADDRESS) { | 213 | if (offset > MAX_PHY_REG_ADDRESS) { |
214 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 214 | e_dbg("PHY Address %d is out of range\n", offset); |
215 | return -E1000_ERR_PARAM; | 215 | return -E1000_ERR_PARAM; |
216 | } | 216 | } |
217 | 217 | ||
@@ -238,11 +238,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
238 | break; | 238 | break; |
239 | } | 239 | } |
240 | if (!(mdic & E1000_MDIC_READY)) { | 240 | if (!(mdic & E1000_MDIC_READY)) { |
241 | hw_dbg(hw, "MDI Read did not complete\n"); | 241 | e_dbg("MDI Read did not complete\n"); |
242 | return -E1000_ERR_PHY; | 242 | return -E1000_ERR_PHY; |
243 | } | 243 | } |
244 | if (mdic & E1000_MDIC_ERROR) { | 244 | if (mdic & E1000_MDIC_ERROR) { |
245 | hw_dbg(hw, "MDI Error\n"); | 245 | e_dbg("MDI Error\n"); |
246 | return -E1000_ERR_PHY; | 246 | return -E1000_ERR_PHY; |
247 | } | 247 | } |
248 | *data = (u16) mdic; | 248 | *data = (u16) mdic; |
@@ -264,7 +264,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
264 | u32 i, mdic = 0; | 264 | u32 i, mdic = 0; |
265 | 265 | ||
266 | if (offset > MAX_PHY_REG_ADDRESS) { | 266 | if (offset > MAX_PHY_REG_ADDRESS) { |
267 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 267 | e_dbg("PHY Address %d is out of range\n", offset); |
268 | return -E1000_ERR_PARAM; | 268 | return -E1000_ERR_PARAM; |
269 | } | 269 | } |
270 | 270 | ||
@@ -292,11 +292,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
292 | break; | 292 | break; |
293 | } | 293 | } |
294 | if (!(mdic & E1000_MDIC_READY)) { | 294 | if (!(mdic & E1000_MDIC_READY)) { |
295 | hw_dbg(hw, "MDI Write did not complete\n"); | 295 | e_dbg("MDI Write did not complete\n"); |
296 | return -E1000_ERR_PHY; | 296 | return -E1000_ERR_PHY; |
297 | } | 297 | } |
298 | if (mdic & E1000_MDIC_ERROR) { | 298 | if (mdic & E1000_MDIC_ERROR) { |
299 | hw_dbg(hw, "MDI Error\n"); | 299 | e_dbg("MDI Error\n"); |
300 | return -E1000_ERR_PHY; | 300 | return -E1000_ERR_PHY; |
301 | } | 301 | } |
302 | 302 | ||
@@ -317,14 +317,14 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) | |||
317 | { | 317 | { |
318 | s32 ret_val; | 318 | s32 ret_val; |
319 | 319 | ||
320 | ret_val = hw->phy.ops.acquire_phy(hw); | 320 | ret_val = hw->phy.ops.acquire(hw); |
321 | if (ret_val) | 321 | if (ret_val) |
322 | return ret_val; | 322 | return ret_val; |
323 | 323 | ||
324 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 324 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
325 | data); | 325 | data); |
326 | 326 | ||
327 | hw->phy.ops.release_phy(hw); | 327 | hw->phy.ops.release(hw); |
328 | 328 | ||
329 | return ret_val; | 329 | return ret_val; |
330 | } | 330 | } |
@@ -342,14 +342,14 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) | |||
342 | { | 342 | { |
343 | s32 ret_val; | 343 | s32 ret_val; |
344 | 344 | ||
345 | ret_val = hw->phy.ops.acquire_phy(hw); | 345 | ret_val = hw->phy.ops.acquire(hw); |
346 | if (ret_val) | 346 | if (ret_val) |
347 | return ret_val; | 347 | return ret_val; |
348 | 348 | ||
349 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 349 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
350 | data); | 350 | data); |
351 | 351 | ||
352 | hw->phy.ops.release_phy(hw); | 352 | hw->phy.ops.release(hw); |
353 | 353 | ||
354 | return ret_val; | 354 | return ret_val; |
355 | } | 355 | } |
@@ -371,10 +371,10 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
371 | s32 ret_val = 0; | 371 | s32 ret_val = 0; |
372 | 372 | ||
373 | if (!locked) { | 373 | if (!locked) { |
374 | if (!(hw->phy.ops.acquire_phy)) | 374 | if (!(hw->phy.ops.acquire)) |
375 | goto out; | 375 | goto out; |
376 | 376 | ||
377 | ret_val = hw->phy.ops.acquire_phy(hw); | 377 | ret_val = hw->phy.ops.acquire(hw); |
378 | if (ret_val) | 378 | if (ret_val) |
379 | goto out; | 379 | goto out; |
380 | } | 380 | } |
@@ -392,7 +392,7 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | |||
392 | 392 | ||
393 | release: | 393 | release: |
394 | if (!locked) | 394 | if (!locked) |
395 | hw->phy.ops.release_phy(hw); | 395 | hw->phy.ops.release(hw); |
396 | out: | 396 | out: |
397 | return ret_val; | 397 | return ret_val; |
398 | } | 398 | } |
@@ -442,10 +442,10 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
442 | s32 ret_val = 0; | 442 | s32 ret_val = 0; |
443 | 443 | ||
444 | if (!locked) { | 444 | if (!locked) { |
445 | if (!(hw->phy.ops.acquire_phy)) | 445 | if (!(hw->phy.ops.acquire)) |
446 | goto out; | 446 | goto out; |
447 | 447 | ||
448 | ret_val = hw->phy.ops.acquire_phy(hw); | 448 | ret_val = hw->phy.ops.acquire(hw); |
449 | if (ret_val) | 449 | if (ret_val) |
450 | goto out; | 450 | goto out; |
451 | } | 451 | } |
@@ -463,7 +463,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | |||
463 | 463 | ||
464 | release: | 464 | release: |
465 | if (!locked) | 465 | if (!locked) |
466 | hw->phy.ops.release_phy(hw); | 466 | hw->phy.ops.release(hw); |
467 | 467 | ||
468 | out: | 468 | out: |
469 | return ret_val; | 469 | return ret_val; |
@@ -515,10 +515,10 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
515 | s32 ret_val = 0; | 515 | s32 ret_val = 0; |
516 | 516 | ||
517 | if (!locked) { | 517 | if (!locked) { |
518 | if (!(hw->phy.ops.acquire_phy)) | 518 | if (!(hw->phy.ops.acquire)) |
519 | goto out; | 519 | goto out; |
520 | 520 | ||
521 | ret_val = hw->phy.ops.acquire_phy(hw); | 521 | ret_val = hw->phy.ops.acquire(hw); |
522 | if (ret_val) | 522 | if (ret_val) |
523 | goto out; | 523 | goto out; |
524 | } | 524 | } |
@@ -533,7 +533,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
533 | *data = (u16)kmrnctrlsta; | 533 | *data = (u16)kmrnctrlsta; |
534 | 534 | ||
535 | if (!locked) | 535 | if (!locked) |
536 | hw->phy.ops.release_phy(hw); | 536 | hw->phy.ops.release(hw); |
537 | 537 | ||
538 | out: | 538 | out: |
539 | return ret_val; | 539 | return ret_val; |
@@ -587,10 +587,10 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
587 | s32 ret_val = 0; | 587 | s32 ret_val = 0; |
588 | 588 | ||
589 | if (!locked) { | 589 | if (!locked) { |
590 | if (!(hw->phy.ops.acquire_phy)) | 590 | if (!(hw->phy.ops.acquire)) |
591 | goto out; | 591 | goto out; |
592 | 592 | ||
593 | ret_val = hw->phy.ops.acquire_phy(hw); | 593 | ret_val = hw->phy.ops.acquire(hw); |
594 | if (ret_val) | 594 | if (ret_val) |
595 | goto out; | 595 | goto out; |
596 | } | 596 | } |
@@ -602,7 +602,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
602 | udelay(2); | 602 | udelay(2); |
603 | 603 | ||
604 | if (!locked) | 604 | if (!locked) |
605 | hw->phy.ops.release_phy(hw); | 605 | hw->phy.ops.release(hw); |
606 | 606 | ||
607 | out: | 607 | out: |
608 | return ret_val; | 608 | return ret_val; |
@@ -649,7 +649,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
649 | u16 phy_data; | 649 | u16 phy_data; |
650 | 650 | ||
651 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 651 | /* Enable CRS on TX. This must be set for half-duplex operation. */ |
652 | ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data); | 652 | ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); |
653 | if (ret_val) | 653 | if (ret_val) |
654 | goto out; | 654 | goto out; |
655 | 655 | ||
@@ -658,7 +658,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
658 | /* Enable downshift */ | 658 | /* Enable downshift */ |
659 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | 659 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; |
660 | 660 | ||
661 | ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); | 661 | ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); |
662 | 662 | ||
663 | out: | 663 | out: |
664 | return ret_val; | 664 | return ret_val; |
@@ -776,12 +776,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
776 | /* Commit the changes. */ | 776 | /* Commit the changes. */ |
777 | ret_val = e1000e_commit_phy(hw); | 777 | ret_val = e1000e_commit_phy(hw); |
778 | if (ret_val) { | 778 | if (ret_val) { |
779 | hw_dbg(hw, "Error committing the PHY changes\n"); | 779 | e_dbg("Error committing the PHY changes\n"); |
780 | return ret_val; | 780 | return ret_val; |
781 | } | 781 | } |
782 | 782 | ||
783 | if (phy->type == e1000_phy_82578) { | 783 | if (phy->type == e1000_phy_82578) { |
784 | ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 784 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
785 | &phy_data); | 785 | &phy_data); |
786 | if (ret_val) | 786 | if (ret_val) |
787 | return ret_val; | 787 | return ret_val; |
@@ -789,7 +789,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
789 | /* 82578 PHY - set the downshift count to 1x. */ | 789 | /* 82578 PHY - set the downshift count to 1x. */ |
790 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; | 790 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; |
791 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; | 791 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; |
792 | ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 792 | ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
793 | phy_data); | 793 | phy_data); |
794 | if (ret_val) | 794 | if (ret_val) |
795 | return ret_val; | 795 | return ret_val; |
@@ -813,7 +813,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
813 | 813 | ||
814 | ret_val = e1000_phy_hw_reset(hw); | 814 | ret_val = e1000_phy_hw_reset(hw); |
815 | if (ret_val) { | 815 | if (ret_val) { |
816 | hw_dbg(hw, "Error resetting the PHY.\n"); | 816 | e_dbg("Error resetting the PHY.\n"); |
817 | return ret_val; | 817 | return ret_val; |
818 | } | 818 | } |
819 | 819 | ||
@@ -824,9 +824,9 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
824 | msleep(100); | 824 | msleep(100); |
825 | 825 | ||
826 | /* disable lplu d0 during driver init */ | 826 | /* disable lplu d0 during driver init */ |
827 | ret_val = e1000_set_d0_lplu_state(hw, 0); | 827 | ret_val = e1000_set_d0_lplu_state(hw, false); |
828 | if (ret_val) { | 828 | if (ret_val) { |
829 | hw_dbg(hw, "Error Disabling LPLU D0\n"); | 829 | e_dbg("Error Disabling LPLU D0\n"); |
830 | return ret_val; | 830 | return ret_val; |
831 | } | 831 | } |
832 | /* Configure mdi-mdix settings */ | 832 | /* Configure mdi-mdix settings */ |
@@ -962,39 +962,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
962 | NWAY_AR_10T_HD_CAPS); | 962 | NWAY_AR_10T_HD_CAPS); |
963 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | 963 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); |
964 | 964 | ||
965 | hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); | 965 | e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); |
966 | 966 | ||
967 | /* Do we want to advertise 10 Mb Half Duplex? */ | 967 | /* Do we want to advertise 10 Mb Half Duplex? */ |
968 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | 968 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { |
969 | hw_dbg(hw, "Advertise 10mb Half duplex\n"); | 969 | e_dbg("Advertise 10mb Half duplex\n"); |
970 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | 970 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; |
971 | } | 971 | } |
972 | 972 | ||
973 | /* Do we want to advertise 10 Mb Full Duplex? */ | 973 | /* Do we want to advertise 10 Mb Full Duplex? */ |
974 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | 974 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { |
975 | hw_dbg(hw, "Advertise 10mb Full duplex\n"); | 975 | e_dbg("Advertise 10mb Full duplex\n"); |
976 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | 976 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; |
977 | } | 977 | } |
978 | 978 | ||
979 | /* Do we want to advertise 100 Mb Half Duplex? */ | 979 | /* Do we want to advertise 100 Mb Half Duplex? */ |
980 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | 980 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { |
981 | hw_dbg(hw, "Advertise 100mb Half duplex\n"); | 981 | e_dbg("Advertise 100mb Half duplex\n"); |
982 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | 982 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; |
983 | } | 983 | } |
984 | 984 | ||
985 | /* Do we want to advertise 100 Mb Full Duplex? */ | 985 | /* Do we want to advertise 100 Mb Full Duplex? */ |
986 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | 986 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { |
987 | hw_dbg(hw, "Advertise 100mb Full duplex\n"); | 987 | e_dbg("Advertise 100mb Full duplex\n"); |
988 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | 988 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; |
989 | } | 989 | } |
990 | 990 | ||
991 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | 991 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ |
992 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) | 992 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) |
993 | hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); | 993 | e_dbg("Advertise 1000mb Half duplex request denied!\n"); |
994 | 994 | ||
995 | /* Do we want to advertise 1000 Mb Full Duplex? */ | 995 | /* Do we want to advertise 1000 Mb Full Duplex? */ |
996 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | 996 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { |
997 | hw_dbg(hw, "Advertise 1000mb Full duplex\n"); | 997 | e_dbg("Advertise 1000mb Full duplex\n"); |
998 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 998 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
999 | } | 999 | } |
1000 | 1000 | ||
@@ -1053,7 +1053,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1053 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1053 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1054 | break; | 1054 | break; |
1055 | default: | 1055 | default: |
1056 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 1056 | e_dbg("Flow control param set incorrectly\n"); |
1057 | ret_val = -E1000_ERR_CONFIG; | 1057 | ret_val = -E1000_ERR_CONFIG; |
1058 | return ret_val; | 1058 | return ret_val; |
1059 | } | 1059 | } |
@@ -1062,7 +1062,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1062 | if (ret_val) | 1062 | if (ret_val) |
1063 | return ret_val; | 1063 | return ret_val; |
1064 | 1064 | ||
1065 | hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1065 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1066 | 1066 | ||
1067 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | 1067 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { |
1068 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1068 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
@@ -1099,13 +1099,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1099 | if (phy->autoneg_advertised == 0) | 1099 | if (phy->autoneg_advertised == 0) |
1100 | phy->autoneg_advertised = phy->autoneg_mask; | 1100 | phy->autoneg_advertised = phy->autoneg_mask; |
1101 | 1101 | ||
1102 | hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); | 1102 | e_dbg("Reconfiguring auto-neg advertisement params\n"); |
1103 | ret_val = e1000_phy_setup_autoneg(hw); | 1103 | ret_val = e1000_phy_setup_autoneg(hw); |
1104 | if (ret_val) { | 1104 | if (ret_val) { |
1105 | hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); | 1105 | e_dbg("Error Setting up Auto-Negotiation\n"); |
1106 | return ret_val; | 1106 | return ret_val; |
1107 | } | 1107 | } |
1108 | hw_dbg(hw, "Restarting Auto-Neg\n"); | 1108 | e_dbg("Restarting Auto-Neg\n"); |
1109 | 1109 | ||
1110 | /* | 1110 | /* |
1111 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | 1111 | * Restart auto-negotiation by setting the Auto Neg Enable bit and |
@@ -1127,7 +1127,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1127 | if (phy->autoneg_wait_to_complete) { | 1127 | if (phy->autoneg_wait_to_complete) { |
1128 | ret_val = e1000_wait_autoneg(hw); | 1128 | ret_val = e1000_wait_autoneg(hw); |
1129 | if (ret_val) { | 1129 | if (ret_val) { |
1130 | hw_dbg(hw, "Error while waiting for " | 1130 | e_dbg("Error while waiting for " |
1131 | "autoneg to complete\n"); | 1131 | "autoneg to complete\n"); |
1132 | return ret_val; | 1132 | return ret_val; |
1133 | } | 1133 | } |
@@ -1165,10 +1165,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1165 | * PHY will be set to 10H, 10F, 100H or 100F | 1165 | * PHY will be set to 10H, 10F, 100H or 100F |
1166 | * depending on user settings. | 1166 | * depending on user settings. |
1167 | */ | 1167 | */ |
1168 | hw_dbg(hw, "Forcing Speed and Duplex\n"); | 1168 | e_dbg("Forcing Speed and Duplex\n"); |
1169 | ret_val = e1000_phy_force_speed_duplex(hw); | 1169 | ret_val = e1000_phy_force_speed_duplex(hw); |
1170 | if (ret_val) { | 1170 | if (ret_val) { |
1171 | hw_dbg(hw, "Error Forcing Speed and Duplex\n"); | 1171 | e_dbg("Error Forcing Speed and Duplex\n"); |
1172 | return ret_val; | 1172 | return ret_val; |
1173 | } | 1173 | } |
1174 | } | 1174 | } |
@@ -1185,11 +1185,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1185 | return ret_val; | 1185 | return ret_val; |
1186 | 1186 | ||
1187 | if (link) { | 1187 | if (link) { |
1188 | hw_dbg(hw, "Valid link established!!!\n"); | 1188 | e_dbg("Valid link established!!!\n"); |
1189 | e1000e_config_collision_dist(hw); | 1189 | e1000e_config_collision_dist(hw); |
1190 | ret_val = e1000e_config_fc_after_link_up(hw); | 1190 | ret_val = e1000e_config_fc_after_link_up(hw); |
1191 | } else { | 1191 | } else { |
1192 | hw_dbg(hw, "Unable to establish link!!!\n"); | 1192 | e_dbg("Unable to establish link!!!\n"); |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | return ret_val; | 1195 | return ret_val; |
@@ -1235,12 +1235,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1235 | if (ret_val) | 1235 | if (ret_val) |
1236 | return ret_val; | 1236 | return ret_val; |
1237 | 1237 | ||
1238 | hw_dbg(hw, "IGP PSCR: %X\n", phy_data); | 1238 | e_dbg("IGP PSCR: %X\n", phy_data); |
1239 | 1239 | ||
1240 | udelay(1); | 1240 | udelay(1); |
1241 | 1241 | ||
1242 | if (phy->autoneg_wait_to_complete) { | 1242 | if (phy->autoneg_wait_to_complete) { |
1243 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); | 1243 | e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); |
1244 | 1244 | ||
1245 | ret_val = e1000e_phy_has_link_generic(hw, | 1245 | ret_val = e1000e_phy_has_link_generic(hw, |
1246 | PHY_FORCE_LIMIT, | 1246 | PHY_FORCE_LIMIT, |
@@ -1250,7 +1250,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1250 | return ret_val; | 1250 | return ret_val; |
1251 | 1251 | ||
1252 | if (!link) | 1252 | if (!link) |
1253 | hw_dbg(hw, "Link taking longer than expected.\n"); | 1253 | e_dbg("Link taking longer than expected.\n"); |
1254 | 1254 | ||
1255 | /* Try once more */ | 1255 | /* Try once more */ |
1256 | ret_val = e1000e_phy_has_link_generic(hw, | 1256 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -1294,7 +1294,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1294 | if (ret_val) | 1294 | if (ret_val) |
1295 | return ret_val; | 1295 | return ret_val; |
1296 | 1296 | ||
1297 | hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); | 1297 | e_dbg("M88E1000 PSCR: %X\n", phy_data); |
1298 | 1298 | ||
1299 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 1299 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
1300 | if (ret_val) | 1300 | if (ret_val) |
@@ -1312,7 +1312,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1312 | return ret_val; | 1312 | return ret_val; |
1313 | 1313 | ||
1314 | if (phy->autoneg_wait_to_complete) { | 1314 | if (phy->autoneg_wait_to_complete) { |
1315 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); | 1315 | e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); |
1316 | 1316 | ||
1317 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 1317 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
1318 | 100000, &link); | 1318 | 100000, &link); |
@@ -1403,11 +1403,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1403 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | 1403 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { |
1404 | ctrl &= ~E1000_CTRL_FD; | 1404 | ctrl &= ~E1000_CTRL_FD; |
1405 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | 1405 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; |
1406 | hw_dbg(hw, "Half Duplex\n"); | 1406 | e_dbg("Half Duplex\n"); |
1407 | } else { | 1407 | } else { |
1408 | ctrl |= E1000_CTRL_FD; | 1408 | ctrl |= E1000_CTRL_FD; |
1409 | *phy_ctrl |= MII_CR_FULL_DUPLEX; | 1409 | *phy_ctrl |= MII_CR_FULL_DUPLEX; |
1410 | hw_dbg(hw, "Full Duplex\n"); | 1410 | e_dbg("Full Duplex\n"); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | /* Forcing 10mb or 100mb? */ | 1413 | /* Forcing 10mb or 100mb? */ |
@@ -1415,12 +1415,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1415 | ctrl |= E1000_CTRL_SPD_100; | 1415 | ctrl |= E1000_CTRL_SPD_100; |
1416 | *phy_ctrl |= MII_CR_SPEED_100; | 1416 | *phy_ctrl |= MII_CR_SPEED_100; |
1417 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | 1417 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); |
1418 | hw_dbg(hw, "Forcing 100mb\n"); | 1418 | e_dbg("Forcing 100mb\n"); |
1419 | } else { | 1419 | } else { |
1420 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | 1420 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); |
1421 | *phy_ctrl |= MII_CR_SPEED_10; | 1421 | *phy_ctrl |= MII_CR_SPEED_10; |
1422 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | 1422 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); |
1423 | hw_dbg(hw, "Forcing 10mb\n"); | 1423 | e_dbg("Forcing 10mb\n"); |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | e1000e_config_collision_dist(hw); | 1426 | e1000e_config_collision_dist(hw); |
@@ -1535,7 +1535,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) | |||
1535 | break; | 1535 | break; |
1536 | default: | 1536 | default: |
1537 | /* speed downshift not supported */ | 1537 | /* speed downshift not supported */ |
1538 | phy->speed_downgraded = 0; | 1538 | phy->speed_downgraded = false; |
1539 | return 0; | 1539 | return 0; |
1540 | } | 1540 | } |
1541 | 1541 | ||
@@ -1816,7 +1816,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1816 | bool link; | 1816 | bool link; |
1817 | 1817 | ||
1818 | if (hw->phy.media_type != e1000_media_type_copper) { | 1818 | if (hw->phy.media_type != e1000_media_type_copper) { |
1819 | hw_dbg(hw, "Phy info is only valid for copper media\n"); | 1819 | e_dbg("Phy info is only valid for copper media\n"); |
1820 | return -E1000_ERR_CONFIG; | 1820 | return -E1000_ERR_CONFIG; |
1821 | } | 1821 | } |
1822 | 1822 | ||
@@ -1825,7 +1825,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1825 | return ret_val; | 1825 | return ret_val; |
1826 | 1826 | ||
1827 | if (!link) { | 1827 | if (!link) { |
1828 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1828 | e_dbg("Phy info is only valid if link is up\n"); |
1829 | return -E1000_ERR_CONFIG; | 1829 | return -E1000_ERR_CONFIG; |
1830 | } | 1830 | } |
1831 | 1831 | ||
@@ -1893,11 +1893,11 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1893 | return ret_val; | 1893 | return ret_val; |
1894 | 1894 | ||
1895 | if (!link) { | 1895 | if (!link) { |
1896 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1896 | e_dbg("Phy info is only valid if link is up\n"); |
1897 | return -E1000_ERR_CONFIG; | 1897 | return -E1000_ERR_CONFIG; |
1898 | } | 1898 | } |
1899 | 1899 | ||
1900 | phy->polarity_correction = 1; | 1900 | phy->polarity_correction = true; |
1901 | 1901 | ||
1902 | ret_val = e1000_check_polarity_igp(hw); | 1902 | ret_val = e1000_check_polarity_igp(hw); |
1903 | if (ret_val) | 1903 | if (ret_val) |
@@ -1980,7 +1980,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
1980 | if (ret_val) | 1980 | if (ret_val) |
1981 | return 0; | 1981 | return 0; |
1982 | 1982 | ||
1983 | ret_val = phy->ops.acquire_phy(hw); | 1983 | ret_val = phy->ops.acquire(hw); |
1984 | if (ret_val) | 1984 | if (ret_val) |
1985 | return ret_val; | 1985 | return ret_val; |
1986 | 1986 | ||
@@ -1995,7 +1995,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
1995 | 1995 | ||
1996 | udelay(150); | 1996 | udelay(150); |
1997 | 1997 | ||
1998 | phy->ops.release_phy(hw); | 1998 | phy->ops.release(hw); |
1999 | 1999 | ||
2000 | return e1000_get_phy_cfg_done(hw); | 2000 | return e1000_get_phy_cfg_done(hw); |
2001 | } | 2001 | } |
@@ -2021,7 +2021,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw) | |||
2021 | **/ | 2021 | **/ |
2022 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) | 2022 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) |
2023 | { | 2023 | { |
2024 | hw_dbg(hw, "Running IGP 3 PHY init script\n"); | 2024 | e_dbg("Running IGP 3 PHY init script\n"); |
2025 | 2025 | ||
2026 | /* PHY init IGP 3 */ | 2026 | /* PHY init IGP 3 */ |
2027 | /* Enable rise/fall, 10-mode work in class-A */ | 2027 | /* Enable rise/fall, 10-mode work in class-A */ |
@@ -2246,7 +2246,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2246 | u32 page = offset >> IGP_PAGE_SHIFT; | 2246 | u32 page = offset >> IGP_PAGE_SHIFT; |
2247 | u32 page_shift = 0; | 2247 | u32 page_shift = 0; |
2248 | 2248 | ||
2249 | ret_val = hw->phy.ops.acquire_phy(hw); | 2249 | ret_val = hw->phy.ops.acquire(hw); |
2250 | if (ret_val) | 2250 | if (ret_val) |
2251 | return ret_val; | 2251 | return ret_val; |
2252 | 2252 | ||
@@ -2284,7 +2284,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2284 | data); | 2284 | data); |
2285 | 2285 | ||
2286 | out: | 2286 | out: |
2287 | hw->phy.ops.release_phy(hw); | 2287 | hw->phy.ops.release(hw); |
2288 | return ret_val; | 2288 | return ret_val; |
2289 | } | 2289 | } |
2290 | 2290 | ||
@@ -2305,7 +2305,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2305 | u32 page = offset >> IGP_PAGE_SHIFT; | 2305 | u32 page = offset >> IGP_PAGE_SHIFT; |
2306 | u32 page_shift = 0; | 2306 | u32 page_shift = 0; |
2307 | 2307 | ||
2308 | ret_val = hw->phy.ops.acquire_phy(hw); | 2308 | ret_val = hw->phy.ops.acquire(hw); |
2309 | if (ret_val) | 2309 | if (ret_val) |
2310 | return ret_val; | 2310 | return ret_val; |
2311 | 2311 | ||
@@ -2342,7 +2342,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2342 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2342 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2343 | data); | 2343 | data); |
2344 | out: | 2344 | out: |
2345 | hw->phy.ops.release_phy(hw); | 2345 | hw->phy.ops.release(hw); |
2346 | return ret_val; | 2346 | return ret_val; |
2347 | } | 2347 | } |
2348 | 2348 | ||
@@ -2361,7 +2361,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2361 | s32 ret_val; | 2361 | s32 ret_val; |
2362 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2362 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2363 | 2363 | ||
2364 | ret_val = hw->phy.ops.acquire_phy(hw); | 2364 | ret_val = hw->phy.ops.acquire(hw); |
2365 | if (ret_val) | 2365 | if (ret_val) |
2366 | return ret_val; | 2366 | return ret_val; |
2367 | 2367 | ||
@@ -2387,7 +2387,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2387 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2387 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2388 | data); | 2388 | data); |
2389 | out: | 2389 | out: |
2390 | hw->phy.ops.release_phy(hw); | 2390 | hw->phy.ops.release(hw); |
2391 | return ret_val; | 2391 | return ret_val; |
2392 | } | 2392 | } |
2393 | 2393 | ||
@@ -2405,7 +2405,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2405 | s32 ret_val; | 2405 | s32 ret_val; |
2406 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2406 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2407 | 2407 | ||
2408 | ret_val = hw->phy.ops.acquire_phy(hw); | 2408 | ret_val = hw->phy.ops.acquire(hw); |
2409 | if (ret_val) | 2409 | if (ret_val) |
2410 | return ret_val; | 2410 | return ret_val; |
2411 | 2411 | ||
@@ -2431,7 +2431,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2431 | data); | 2431 | data); |
2432 | 2432 | ||
2433 | out: | 2433 | out: |
2434 | hw->phy.ops.release_phy(hw); | 2434 | hw->phy.ops.release(hw); |
2435 | return ret_val; | 2435 | return ret_val; |
2436 | } | 2436 | } |
2437 | 2437 | ||
@@ -2464,7 +2464,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2464 | /* Gig must be disabled for MDIO accesses to page 800 */ | 2464 | /* Gig must be disabled for MDIO accesses to page 800 */ |
2465 | if ((hw->mac.type == e1000_pchlan) && | 2465 | if ((hw->mac.type == e1000_pchlan) && |
2466 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | 2466 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) |
2467 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); | 2467 | e_dbg("Attempting to access page 800 while gig enabled\n"); |
2468 | 2468 | ||
2469 | /* All operations in this function are phy address 1 */ | 2469 | /* All operations in this function are phy address 1 */ |
2470 | hw->phy.addr = 1; | 2470 | hw->phy.addr = 1; |
@@ -2534,8 +2534,8 @@ out: | |||
2534 | **/ | 2534 | **/ |
2535 | s32 e1000e_commit_phy(struct e1000_hw *hw) | 2535 | s32 e1000e_commit_phy(struct e1000_hw *hw) |
2536 | { | 2536 | { |
2537 | if (hw->phy.ops.commit_phy) | 2537 | if (hw->phy.ops.commit) |
2538 | return hw->phy.ops.commit_phy(hw); | 2538 | return hw->phy.ops.commit(hw); |
2539 | 2539 | ||
2540 | return 0; | 2540 | return 0; |
2541 | } | 2541 | } |
@@ -2614,7 +2614,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | |||
2614 | bool in_slow_mode = false; | 2614 | bool in_slow_mode = false; |
2615 | 2615 | ||
2616 | if (!locked) { | 2616 | if (!locked) { |
2617 | ret_val = hw->phy.ops.acquire_phy(hw); | 2617 | ret_val = hw->phy.ops.acquire(hw); |
2618 | if (ret_val) | 2618 | if (ret_val) |
2619 | return ret_val; | 2619 | return ret_val; |
2620 | } | 2620 | } |
@@ -2670,7 +2670,7 @@ out: | |||
2670 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); | 2670 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2671 | 2671 | ||
2672 | if (!locked) | 2672 | if (!locked) |
2673 | hw->phy.ops.release_phy(hw); | 2673 | hw->phy.ops.release(hw); |
2674 | 2674 | ||
2675 | return ret_val; | 2675 | return ret_val; |
2676 | } | 2676 | } |
@@ -2723,7 +2723,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | |||
2723 | bool in_slow_mode = false; | 2723 | bool in_slow_mode = false; |
2724 | 2724 | ||
2725 | if (!locked) { | 2725 | if (!locked) { |
2726 | ret_val = hw->phy.ops.acquire_phy(hw); | 2726 | ret_val = hw->phy.ops.acquire(hw); |
2727 | if (ret_val) | 2727 | if (ret_val) |
2728 | return ret_val; | 2728 | return ret_val; |
2729 | } | 2729 | } |
@@ -2796,7 +2796,7 @@ out: | |||
2796 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); | 2796 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2797 | 2797 | ||
2798 | if (!locked) | 2798 | if (!locked) |
2799 | hw->phy.ops.release_phy(hw); | 2799 | hw->phy.ops.release(hw); |
2800 | 2800 | ||
2801 | return ret_val; | 2801 | return ret_val; |
2802 | } | 2802 | } |
@@ -2872,7 +2872,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2872 | /* masking with 0x3F to remove the page from offset */ | 2872 | /* masking with 0x3F to remove the page from offset */ |
2873 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | 2873 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); |
2874 | if (ret_val) { | 2874 | if (ret_val) { |
2875 | hw_dbg(hw, "Could not write PHY the HV address register\n"); | 2875 | e_dbg("Could not write PHY the HV address register\n"); |
2876 | goto out; | 2876 | goto out; |
2877 | } | 2877 | } |
2878 | 2878 | ||
@@ -2883,7 +2883,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2883 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | 2883 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); |
2884 | 2884 | ||
2885 | if (ret_val) { | 2885 | if (ret_val) { |
2886 | hw_dbg(hw, "Could not read data value from HV data register\n"); | 2886 | e_dbg("Could not read data value from HV data register\n"); |
2887 | goto out; | 2887 | goto out; |
2888 | } | 2888 | } |
2889 | 2889 | ||
@@ -2911,12 +2911,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2911 | goto out; | 2911 | goto out; |
2912 | 2912 | ||
2913 | /* Do not apply workaround if in PHY loopback bit 14 set */ | 2913 | /* Do not apply workaround if in PHY loopback bit 14 set */ |
2914 | hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data); | 2914 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); |
2915 | if (data & PHY_CONTROL_LB) | 2915 | if (data & PHY_CONTROL_LB) |
2916 | goto out; | 2916 | goto out; |
2917 | 2917 | ||
2918 | /* check if link is up and at 1Gbps */ | 2918 | /* check if link is up and at 1Gbps */ |
2919 | ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); | 2919 | ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); |
2920 | if (ret_val) | 2920 | if (ret_val) |
2921 | goto out; | 2921 | goto out; |
2922 | 2922 | ||
@@ -2932,13 +2932,13 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
2932 | mdelay(200); | 2932 | mdelay(200); |
2933 | 2933 | ||
2934 | /* flush the packets in the fifo buffer */ | 2934 | /* flush the packets in the fifo buffer */ |
2935 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 2935 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2936 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 2936 | HV_MUX_DATA_CTRL_GEN_TO_MAC | |
2937 | HV_MUX_DATA_CTRL_FORCE_SPEED); | 2937 | HV_MUX_DATA_CTRL_FORCE_SPEED); |
2938 | if (ret_val) | 2938 | if (ret_val) |
2939 | goto out; | 2939 | goto out; |
2940 | 2940 | ||
2941 | ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, | 2941 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, |
2942 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | 2942 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
2943 | 2943 | ||
2944 | out: | 2944 | out: |
@@ -2959,7 +2959,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) | |||
2959 | s32 ret_val; | 2959 | s32 ret_val; |
2960 | u16 data; | 2960 | u16 data; |
2961 | 2961 | ||
2962 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 2962 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
2963 | 2963 | ||
2964 | if (!ret_val) | 2964 | if (!ret_val) |
2965 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | 2965 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) |
@@ -2984,13 +2984,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
2984 | u16 phy_data; | 2984 | u16 phy_data; |
2985 | bool link; | 2985 | bool link; |
2986 | 2986 | ||
2987 | ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); | 2987 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); |
2988 | if (ret_val) | 2988 | if (ret_val) |
2989 | goto out; | 2989 | goto out; |
2990 | 2990 | ||
2991 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | 2991 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); |
2992 | 2992 | ||
2993 | ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data); | 2993 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); |
2994 | if (ret_val) | 2994 | if (ret_val) |
2995 | goto out; | 2995 | goto out; |
2996 | 2996 | ||
@@ -2998,23 +2998,23 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
2998 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI | 2998 | * Clear Auto-Crossover to force MDI manually. 82577 requires MDI |
2999 | * forced whenever speed and duplex are forced. | 2999 | * forced whenever speed and duplex are forced. |
3000 | */ | 3000 | */ |
3001 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data); | 3001 | ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); |
3002 | if (ret_val) | 3002 | if (ret_val) |
3003 | goto out; | 3003 | goto out; |
3004 | 3004 | ||
3005 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; | 3005 | phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; |
3006 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; | 3006 | phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; |
3007 | 3007 | ||
3008 | ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data); | 3008 | ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); |
3009 | if (ret_val) | 3009 | if (ret_val) |
3010 | goto out; | 3010 | goto out; |
3011 | 3011 | ||
3012 | hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data); | 3012 | e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data); |
3013 | 3013 | ||
3014 | udelay(1); | 3014 | udelay(1); |
3015 | 3015 | ||
3016 | if (phy->autoneg_wait_to_complete) { | 3016 | if (phy->autoneg_wait_to_complete) { |
3017 | hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n"); | 3017 | e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); |
3018 | 3018 | ||
3019 | ret_val = e1000e_phy_has_link_generic(hw, | 3019 | ret_val = e1000e_phy_has_link_generic(hw, |
3020 | PHY_FORCE_LIMIT, | 3020 | PHY_FORCE_LIMIT, |
@@ -3024,7 +3024,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3024 | goto out; | 3024 | goto out; |
3025 | 3025 | ||
3026 | if (!link) | 3026 | if (!link) |
3027 | hw_dbg(hw, "Link taking longer than expected.\n"); | 3027 | e_dbg("Link taking longer than expected.\n"); |
3028 | 3028 | ||
3029 | /* Try once more */ | 3029 | /* Try once more */ |
3030 | ret_val = e1000e_phy_has_link_generic(hw, | 3030 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -3060,7 +3060,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3060 | goto out; | 3060 | goto out; |
3061 | 3061 | ||
3062 | if (!link) { | 3062 | if (!link) { |
3063 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 3063 | e_dbg("Phy info is only valid if link is up\n"); |
3064 | ret_val = -E1000_ERR_CONFIG; | 3064 | ret_val = -E1000_ERR_CONFIG; |
3065 | goto out; | 3065 | goto out; |
3066 | } | 3066 | } |
@@ -3071,7 +3071,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3071 | if (ret_val) | 3071 | if (ret_val) |
3072 | goto out; | 3072 | goto out; |
3073 | 3073 | ||
3074 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); | 3074 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); |
3075 | if (ret_val) | 3075 | if (ret_val) |
3076 | goto out; | 3076 | goto out; |
3077 | 3077 | ||
@@ -3083,7 +3083,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3083 | if (ret_val) | 3083 | if (ret_val) |
3084 | goto out; | 3084 | goto out; |
3085 | 3085 | ||
3086 | ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data); | 3086 | ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); |
3087 | if (ret_val) | 3087 | if (ret_val) |
3088 | goto out; | 3088 | goto out; |
3089 | 3089 | ||
@@ -3117,7 +3117,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3117 | s32 ret_val; | 3117 | s32 ret_val; |
3118 | u16 phy_data, length; | 3118 | u16 phy_data, length; |
3119 | 3119 | ||
3120 | ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); | 3120 | ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); |
3121 | if (ret_val) | 3121 | if (ret_val) |
3122 | goto out; | 3122 | goto out; |
3123 | 3123 | ||
@@ -3125,7 +3125,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3125 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; | 3125 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; |
3126 | 3126 | ||
3127 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | 3127 | if (length == E1000_CABLE_LENGTH_UNDEFINED) |
3128 | ret_val = E1000_ERR_PHY; | 3128 | ret_val = -E1000_ERR_PHY; |
3129 | 3129 | ||
3130 | phy->cable_length = length; | 3130 | phy->cable_length = length; |
3131 | 3131 | ||