diff options
author | Bruce Allan <bruce.w.allan@intel.com> | 2009-11-20 18:25:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-21 14:34:05 -0500 |
commit | 3bb99fe226ead584a4db674dab546689f705201f (patch) | |
tree | 3b49aaef9f4b798b7930a76f62e754eefe0ddb91 /drivers | |
parent | d8014dbca7f5d2d6f0fdb47e5286bd2d887f7065 (diff) |
e1000e: consolidate two dbug macros into one simpler one
This patch depends on a previous one that cleans up redundant #includes.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/e1000e/82571.c | 40 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 4 | ||||
-rw-r--r-- | drivers/net/e1000e/es2lan.c | 23 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 11 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 69 | ||||
-rw-r--r-- | drivers/net/e1000e/lib.c | 122 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 28 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 92 |
8 files changed, 183 insertions, 206 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 407637f6c331..680b7c703062 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -309,7 +309,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
309 | * indicates that the bootagent or EFI code has | 309 | * indicates that the bootagent or EFI code has |
310 | * improperly left this bit enabled | 310 | * improperly left this bit enabled |
311 | */ | 311 | */ |
312 | hw_dbg(hw, "Please update your 82571 Bootagent\n"); | 312 | e_dbg("Please update your 82571 Bootagent\n"); |
313 | } | 313 | } |
314 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | 314 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); |
315 | } | 315 | } |
@@ -483,7 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
483 | } | 483 | } |
484 | 484 | ||
485 | if (i == sw_timeout) { | 485 | if (i == sw_timeout) { |
486 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 486 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
487 | hw->dev_spec.e82571.smb_counter++; | 487 | hw->dev_spec.e82571.smb_counter++; |
488 | } | 488 | } |
489 | /* Get the FW semaphore. */ | 489 | /* Get the FW semaphore. */ |
@@ -501,7 +501,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | |||
501 | if (i == fw_timeout) { | 501 | if (i == fw_timeout) { |
502 | /* Release semaphores */ | 502 | /* Release semaphores */ |
503 | e1000_put_hw_semaphore_82571(hw); | 503 | e1000_put_hw_semaphore_82571(hw); |
504 | hw_dbg(hw, "Driver can't access the NVM\n"); | 504 | e_dbg("Driver can't access the NVM\n"); |
505 | return -E1000_ERR_NVM; | 505 | return -E1000_ERR_NVM; |
506 | } | 506 | } |
507 | 507 | ||
@@ -708,7 +708,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
708 | */ | 708 | */ |
709 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 709 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
710 | (words == 0)) { | 710 | (words == 0)) { |
711 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 711 | e_dbg("nvm parameter(s) out of bounds\n"); |
712 | return -E1000_ERR_NVM; | 712 | return -E1000_ERR_NVM; |
713 | } | 713 | } |
714 | 714 | ||
@@ -749,7 +749,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | |||
749 | timeout--; | 749 | timeout--; |
750 | } | 750 | } |
751 | if (!timeout) { | 751 | if (!timeout) { |
752 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 752 | e_dbg("MNG configuration cycle has not completed.\n"); |
753 | return -E1000_ERR_RESET; | 753 | return -E1000_ERR_RESET; |
754 | } | 754 | } |
755 | 755 | ||
@@ -848,9 +848,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
848 | */ | 848 | */ |
849 | ret_val = e1000e_disable_pcie_master(hw); | 849 | ret_val = e1000e_disable_pcie_master(hw); |
850 | if (ret_val) | 850 | if (ret_val) |
851 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 851 | e_dbg("PCI-E Master disable polling has failed.\n"); |
852 | 852 | ||
853 | hw_dbg(hw, "Masking off all interrupts\n"); | 853 | e_dbg("Masking off all interrupts\n"); |
854 | ew32(IMC, 0xffffffff); | 854 | ew32(IMC, 0xffffffff); |
855 | 855 | ||
856 | ew32(RCTL, 0); | 856 | ew32(RCTL, 0); |
@@ -889,7 +889,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
889 | 889 | ||
890 | ctrl = er32(CTRL); | 890 | ctrl = er32(CTRL); |
891 | 891 | ||
892 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 892 | e_dbg("Issuing a global reset to MAC\n"); |
893 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 893 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
894 | 894 | ||
895 | if (hw->nvm.type == e1000_nvm_flash_hw) { | 895 | if (hw->nvm.type == e1000_nvm_flash_hw) { |
@@ -955,12 +955,12 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
955 | /* Initialize identification LED */ | 955 | /* Initialize identification LED */ |
956 | ret_val = e1000e_id_led_init(hw); | 956 | ret_val = e1000e_id_led_init(hw); |
957 | if (ret_val) { | 957 | if (ret_val) { |
958 | hw_dbg(hw, "Error initializing identification LED\n"); | 958 | e_dbg("Error initializing identification LED\n"); |
959 | return ret_val; | 959 | return ret_val; |
960 | } | 960 | } |
961 | 961 | ||
962 | /* Disabling VLAN filtering */ | 962 | /* Disabling VLAN filtering */ |
963 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 963 | e_dbg("Initializing the IEEE VLAN\n"); |
964 | e1000e_clear_vfta(hw); | 964 | e1000e_clear_vfta(hw); |
965 | 965 | ||
966 | /* Setup the receive address. */ | 966 | /* Setup the receive address. */ |
@@ -974,7 +974,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
974 | e1000e_init_rx_addrs(hw, rar_count); | 974 | e1000e_init_rx_addrs(hw, rar_count); |
975 | 975 | ||
976 | /* Zero out the Multicast HASH table */ | 976 | /* Zero out the Multicast HASH table */ |
977 | hw_dbg(hw, "Zeroing the MTA\n"); | 977 | e_dbg("Zeroing the MTA\n"); |
978 | for (i = 0; i < mac->mta_reg_count; i++) | 978 | for (i = 0; i < mac->mta_reg_count; i++) |
979 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 979 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
980 | 980 | ||
@@ -1383,7 +1383,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1383 | */ | 1383 | */ |
1384 | mac->serdes_link_state = | 1384 | mac->serdes_link_state = |
1385 | e1000_serdes_link_autoneg_progress; | 1385 | e1000_serdes_link_autoneg_progress; |
1386 | hw_dbg(hw, "AN_UP -> AN_PROG\n"); | 1386 | e_dbg("AN_UP -> AN_PROG\n"); |
1387 | } | 1387 | } |
1388 | break; | 1388 | break; |
1389 | 1389 | ||
@@ -1401,7 +1401,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1401 | (ctrl & ~E1000_CTRL_SLU)); | 1401 | (ctrl & ~E1000_CTRL_SLU)); |
1402 | mac->serdes_link_state = | 1402 | mac->serdes_link_state = |
1403 | e1000_serdes_link_autoneg_progress; | 1403 | e1000_serdes_link_autoneg_progress; |
1404 | hw_dbg(hw, "FORCED_UP -> AN_PROG\n"); | 1404 | e_dbg("FORCED_UP -> AN_PROG\n"); |
1405 | } | 1405 | } |
1406 | break; | 1406 | break; |
1407 | 1407 | ||
@@ -1415,7 +1415,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1415 | if (status & E1000_STATUS_LU) { | 1415 | if (status & E1000_STATUS_LU) { |
1416 | mac->serdes_link_state = | 1416 | mac->serdes_link_state = |
1417 | e1000_serdes_link_autoneg_complete; | 1417 | e1000_serdes_link_autoneg_complete; |
1418 | hw_dbg(hw, "AN_PROG -> AN_UP\n"); | 1418 | e_dbg("AN_PROG -> AN_UP\n"); |
1419 | } else { | 1419 | } else { |
1420 | /* | 1420 | /* |
1421 | * Disable autoneg, force link up and | 1421 | * Disable autoneg, force link up and |
@@ -1430,12 +1430,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1430 | ret_val = | 1430 | ret_val = |
1431 | e1000e_config_fc_after_link_up(hw); | 1431 | e1000e_config_fc_after_link_up(hw); |
1432 | if (ret_val) { | 1432 | if (ret_val) { |
1433 | hw_dbg(hw, "Error config flow control\n"); | 1433 | e_dbg("Error config flow control\n"); |
1434 | break; | 1434 | break; |
1435 | } | 1435 | } |
1436 | mac->serdes_link_state = | 1436 | mac->serdes_link_state = |
1437 | e1000_serdes_link_forced_up; | 1437 | e1000_serdes_link_forced_up; |
1438 | hw_dbg(hw, "AN_PROG -> FORCED_UP\n"); | 1438 | e_dbg("AN_PROG -> FORCED_UP\n"); |
1439 | } | 1439 | } |
1440 | mac->serdes_has_link = true; | 1440 | mac->serdes_has_link = true; |
1441 | break; | 1441 | break; |
@@ -1450,14 +1450,14 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1450 | (ctrl & ~E1000_CTRL_SLU)); | 1450 | (ctrl & ~E1000_CTRL_SLU)); |
1451 | mac->serdes_link_state = | 1451 | mac->serdes_link_state = |
1452 | e1000_serdes_link_autoneg_progress; | 1452 | e1000_serdes_link_autoneg_progress; |
1453 | hw_dbg(hw, "DOWN -> AN_PROG\n"); | 1453 | e_dbg("DOWN -> AN_PROG\n"); |
1454 | break; | 1454 | break; |
1455 | } | 1455 | } |
1456 | } else { | 1456 | } else { |
1457 | if (!(rxcw & E1000_RXCW_SYNCH)) { | 1457 | if (!(rxcw & E1000_RXCW_SYNCH)) { |
1458 | mac->serdes_has_link = false; | 1458 | mac->serdes_has_link = false; |
1459 | mac->serdes_link_state = e1000_serdes_link_down; | 1459 | mac->serdes_link_state = e1000_serdes_link_down; |
1460 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1460 | e_dbg("ANYSTATE -> DOWN\n"); |
1461 | } else { | 1461 | } else { |
1462 | /* | 1462 | /* |
1463 | * We have sync, and can tolerate one | 1463 | * We have sync, and can tolerate one |
@@ -1469,7 +1469,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1469 | if (rxcw & E1000_RXCW_IV) { | 1469 | if (rxcw & E1000_RXCW_IV) { |
1470 | mac->serdes_link_state = e1000_serdes_link_down; | 1470 | mac->serdes_link_state = e1000_serdes_link_down; |
1471 | mac->serdes_has_link = false; | 1471 | mac->serdes_has_link = false; |
1472 | hw_dbg(hw, "ANYSTATE -> DOWN\n"); | 1472 | e_dbg("ANYSTATE -> DOWN\n"); |
1473 | } | 1473 | } |
1474 | } | 1474 | } |
1475 | } | 1475 | } |
@@ -1491,7 +1491,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1491 | 1491 | ||
1492 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1492 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1493 | if (ret_val) { | 1493 | if (ret_val) { |
1494 | hw_dbg(hw, "NVM Read Error\n"); | 1494 | e_dbg("NVM Read Error\n"); |
1495 | return ret_val; | 1495 | return ret_val; |
1496 | } | 1496 | } |
1497 | 1497 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index ac5d8552747b..dac00a023835 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -48,9 +48,9 @@ struct e1000_info; | |||
48 | 48 | ||
49 | #ifdef DEBUG | 49 | #ifdef DEBUG |
50 | #define e_dbg(format, arg...) \ | 50 | #define e_dbg(format, arg...) \ |
51 | e_printk(KERN_DEBUG , adapter, format, ## arg) | 51 | e_printk(KERN_DEBUG , hw->adapter, format, ## arg) |
52 | #else | 52 | #else |
53 | #define e_dbg(format, arg...) do { (void)(adapter); } while (0) | 53 | #define e_dbg(format, arg...) do { (void)(hw); } while (0) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define e_err(format, arg...) \ | 56 | #define e_err(format, arg...) \ |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index 6fd46f5f8a3c..f5601c5ff7af 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -394,8 +394,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | if (i == timeout) { | 396 | if (i == timeout) { |
397 | hw_dbg(hw, | 397 | e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); |
398 | "Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
399 | return -E1000_ERR_SWFW_SYNC; | 398 | return -E1000_ERR_SWFW_SYNC; |
400 | } | 399 | } |
401 | 400 | ||
@@ -597,7 +596,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) | |||
597 | timeout--; | 596 | timeout--; |
598 | } | 597 | } |
599 | if (!timeout) { | 598 | if (!timeout) { |
600 | hw_dbg(hw, "MNG configuration cycle has not completed.\n"); | 599 | e_dbg("MNG configuration cycle has not completed.\n"); |
601 | return -E1000_ERR_RESET; | 600 | return -E1000_ERR_RESET; |
602 | } | 601 | } |
603 | 602 | ||
@@ -630,7 +629,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
630 | if (ret_val) | 629 | if (ret_val) |
631 | return ret_val; | 630 | return ret_val; |
632 | 631 | ||
633 | hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); | 632 | e_dbg("GG82563 PSCR: %X\n", phy_data); |
634 | 633 | ||
635 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 634 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
636 | if (ret_val) | 635 | if (ret_val) |
@@ -648,7 +647,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
648 | udelay(1); | 647 | udelay(1); |
649 | 648 | ||
650 | if (hw->phy.autoneg_wait_to_complete) { | 649 | if (hw->phy.autoneg_wait_to_complete) { |
651 | hw_dbg(hw, "Waiting for forced speed/duplex link " | 650 | e_dbg("Waiting for forced speed/duplex link " |
652 | "on GG82563 phy.\n"); | 651 | "on GG82563 phy.\n"); |
653 | 652 | ||
654 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 653 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
@@ -771,9 +770,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
771 | */ | 770 | */ |
772 | ret_val = e1000e_disable_pcie_master(hw); | 771 | ret_val = e1000e_disable_pcie_master(hw); |
773 | if (ret_val) | 772 | if (ret_val) |
774 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 773 | e_dbg("PCI-E Master disable polling has failed.\n"); |
775 | 774 | ||
776 | hw_dbg(hw, "Masking off all interrupts\n"); | 775 | e_dbg("Masking off all interrupts\n"); |
777 | ew32(IMC, 0xffffffff); | 776 | ew32(IMC, 0xffffffff); |
778 | 777 | ||
779 | ew32(RCTL, 0); | 778 | ew32(RCTL, 0); |
@@ -785,7 +784,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
785 | ctrl = er32(CTRL); | 784 | ctrl = er32(CTRL); |
786 | 785 | ||
787 | ret_val = e1000_acquire_phy_80003es2lan(hw); | 786 | ret_val = e1000_acquire_phy_80003es2lan(hw); |
788 | hw_dbg(hw, "Issuing a global reset to MAC\n"); | 787 | e_dbg("Issuing a global reset to MAC\n"); |
789 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 788 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
790 | e1000_release_phy_80003es2lan(hw); | 789 | e1000_release_phy_80003es2lan(hw); |
791 | 790 | ||
@@ -820,19 +819,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
820 | /* Initialize identification LED */ | 819 | /* Initialize identification LED */ |
821 | ret_val = e1000e_id_led_init(hw); | 820 | ret_val = e1000e_id_led_init(hw); |
822 | if (ret_val) { | 821 | if (ret_val) { |
823 | hw_dbg(hw, "Error initializing identification LED\n"); | 822 | e_dbg("Error initializing identification LED\n"); |
824 | return ret_val; | 823 | return ret_val; |
825 | } | 824 | } |
826 | 825 | ||
827 | /* Disabling VLAN filtering */ | 826 | /* Disabling VLAN filtering */ |
828 | hw_dbg(hw, "Initializing the IEEE VLAN\n"); | 827 | e_dbg("Initializing the IEEE VLAN\n"); |
829 | e1000e_clear_vfta(hw); | 828 | e1000e_clear_vfta(hw); |
830 | 829 | ||
831 | /* Setup the receive address. */ | 830 | /* Setup the receive address. */ |
832 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 831 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
833 | 832 | ||
834 | /* Zero out the Multicast HASH table */ | 833 | /* Zero out the Multicast HASH table */ |
835 | hw_dbg(hw, "Zeroing the MTA\n"); | 834 | e_dbg("Zeroing the MTA\n"); |
836 | for (i = 0; i < mac->mta_reg_count; i++) | 835 | for (i = 0; i < mac->mta_reg_count; i++) |
837 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 836 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
838 | 837 | ||
@@ -989,7 +988,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
989 | /* SW Reset the PHY so all changes take effect */ | 988 | /* SW Reset the PHY so all changes take effect */ |
990 | ret_val = e1000e_commit_phy(hw); | 989 | ret_val = e1000e_commit_phy(hw); |
991 | if (ret_val) { | 990 | if (ret_val) { |
992 | hw_dbg(hw, "Error Resetting the PHY\n"); | 991 | e_dbg("Error Resetting the PHY\n"); |
993 | return ret_val; | 992 | return ret_val; |
994 | } | 993 | } |
995 | 994 | ||
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aaea41ef794d..44808b053fcc 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -925,15 +925,4 @@ struct e1000_hw { | |||
925 | } dev_spec; | 925 | } dev_spec; |
926 | }; | 926 | }; |
927 | 927 | ||
928 | #ifdef DEBUG | ||
929 | #define hw_dbg(hw, format, arg...) \ | ||
930 | printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) | ||
931 | #else | ||
932 | static inline int __attribute__ ((format (printf, 2, 3))) | ||
933 | hw_dbg(struct e1000_hw *hw, const char *format, ...) | ||
934 | { | ||
935 | return 0; | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | #endif | 928 | #endif |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index bef5e3333f2a..101a2714467e 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -368,7 +368,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
368 | 368 | ||
369 | /* Can't read flash registers if the register set isn't mapped. */ | 369 | /* Can't read flash registers if the register set isn't mapped. */ |
370 | if (!hw->flash_address) { | 370 | if (!hw->flash_address) { |
371 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); | 371 | e_dbg("ERROR: Flash registers not mapped\n"); |
372 | return -E1000_ERR_CONFIG; | 372 | return -E1000_ERR_CONFIG; |
373 | } | 373 | } |
374 | 374 | ||
@@ -550,7 +550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
550 | */ | 550 | */ |
551 | ret_val = e1000e_config_fc_after_link_up(hw); | 551 | ret_val = e1000e_config_fc_after_link_up(hw); |
552 | if (ret_val) | 552 | if (ret_val) |
553 | hw_dbg(hw, "Error configuring flow control\n"); | 553 | e_dbg("Error configuring flow control\n"); |
554 | 554 | ||
555 | out: | 555 | out: |
556 | return ret_val; | 556 | return ret_val; |
@@ -644,7 +644,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
644 | } | 644 | } |
645 | 645 | ||
646 | if (!timeout) { | 646 | if (!timeout) { |
647 | hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); | 647 | e_dbg("SW/FW/HW has locked the resource for too long.\n"); |
648 | ret_val = -E1000_ERR_CONFIG; | 648 | ret_val = -E1000_ERR_CONFIG; |
649 | goto out; | 649 | goto out; |
650 | } | 650 | } |
@@ -664,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
664 | } | 664 | } |
665 | 665 | ||
666 | if (!timeout) { | 666 | if (!timeout) { |
667 | hw_dbg(hw, "Failed to acquire the semaphore.\n"); | 667 | e_dbg("Failed to acquire the semaphore.\n"); |
668 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 668 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
669 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 669 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
670 | ret_val = -E1000_ERR_CONFIG; | 670 | ret_val = -E1000_ERR_CONFIG; |
@@ -773,12 +773,12 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
773 | if (ret_val) | 773 | if (ret_val) |
774 | return ret_val; | 774 | return ret_val; |
775 | 775 | ||
776 | hw_dbg(hw, "IFE PMC: %X\n", data); | 776 | e_dbg("IFE PMC: %X\n", data); |
777 | 777 | ||
778 | udelay(1); | 778 | udelay(1); |
779 | 779 | ||
780 | if (phy->autoneg_wait_to_complete) { | 780 | if (phy->autoneg_wait_to_complete) { |
781 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); | 781 | e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); |
782 | 782 | ||
783 | ret_val = e1000e_phy_has_link_generic(hw, | 783 | ret_val = e1000e_phy_has_link_generic(hw, |
784 | PHY_FORCE_LIMIT, | 784 | PHY_FORCE_LIMIT, |
@@ -788,7 +788,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
788 | return ret_val; | 788 | return ret_val; |
789 | 789 | ||
790 | if (!link) | 790 | if (!link) |
791 | hw_dbg(hw, "Link taking longer than expected.\n"); | 791 | e_dbg("Link taking longer than expected.\n"); |
792 | 792 | ||
793 | /* Try once more */ | 793 | /* Try once more */ |
794 | ret_val = e1000e_phy_has_link_generic(hw, | 794 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -1203,7 +1203,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | |||
1203 | * leave the PHY in a bad state possibly resulting in no link. | 1203 | * leave the PHY in a bad state possibly resulting in no link. |
1204 | */ | 1204 | */ |
1205 | if (loop == 0) | 1205 | if (loop == 0) |
1206 | hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); | 1206 | e_dbg("LAN_INIT_DONE not set, increase timeout\n"); |
1207 | 1207 | ||
1208 | /* Clear the Init Done bit for the next init event */ | 1208 | /* Clear the Init Done bit for the next init event */ |
1209 | data = er32(STATUS); | 1209 | data = er32(STATUS); |
@@ -1274,7 +1274,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) | |||
1274 | return ret_val; | 1274 | return ret_val; |
1275 | 1275 | ||
1276 | if (!link) { | 1276 | if (!link) { |
1277 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1277 | e_dbg("Phy info is only valid if link is up\n"); |
1278 | return -E1000_ERR_CONFIG; | 1278 | return -E1000_ERR_CONFIG; |
1279 | } | 1279 | } |
1280 | 1280 | ||
@@ -1604,7 +1604,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1604 | 1604 | ||
1605 | return 0; | 1605 | return 0; |
1606 | } | 1606 | } |
1607 | hw_dbg(hw, "Unable to determine valid NVM bank via EEC - " | 1607 | e_dbg("Unable to determine valid NVM bank via EEC - " |
1608 | "reading flash signature\n"); | 1608 | "reading flash signature\n"); |
1609 | /* fall-thru */ | 1609 | /* fall-thru */ |
1610 | default: | 1610 | default: |
@@ -1634,7 +1634,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
1634 | return 0; | 1634 | return 0; |
1635 | } | 1635 | } |
1636 | 1636 | ||
1637 | hw_dbg(hw, "ERROR: No valid NVM bank present\n"); | 1637 | e_dbg("ERROR: No valid NVM bank present\n"); |
1638 | return -E1000_ERR_NVM; | 1638 | return -E1000_ERR_NVM; |
1639 | } | 1639 | } |
1640 | 1640 | ||
@@ -1662,7 +1662,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1662 | 1662 | ||
1663 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1663 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1664 | (words == 0)) { | 1664 | (words == 0)) { |
1665 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1665 | e_dbg("nvm parameter(s) out of bounds\n"); |
1666 | ret_val = -E1000_ERR_NVM; | 1666 | ret_val = -E1000_ERR_NVM; |
1667 | goto out; | 1667 | goto out; |
1668 | } | 1668 | } |
@@ -1671,7 +1671,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1671 | 1671 | ||
1672 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1672 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1673 | if (ret_val) { | 1673 | if (ret_val) { |
1674 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 1674 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
1675 | bank = 0; | 1675 | bank = 0; |
1676 | } | 1676 | } |
1677 | 1677 | ||
@@ -1697,7 +1697,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1697 | 1697 | ||
1698 | out: | 1698 | out: |
1699 | if (ret_val) | 1699 | if (ret_val) |
1700 | hw_dbg(hw, "NVM read error: %d\n", ret_val); | 1700 | e_dbg("NVM read error: %d\n", ret_val); |
1701 | 1701 | ||
1702 | return ret_val; | 1702 | return ret_val; |
1703 | } | 1703 | } |
@@ -1719,7 +1719,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1719 | 1719 | ||
1720 | /* Check if the flash descriptor is valid */ | 1720 | /* Check if the flash descriptor is valid */ |
1721 | if (hsfsts.hsf_status.fldesvalid == 0) { | 1721 | if (hsfsts.hsf_status.fldesvalid == 0) { |
1722 | hw_dbg(hw, "Flash descriptor invalid. " | 1722 | e_dbg("Flash descriptor invalid. " |
1723 | "SW Sequencing must be used."); | 1723 | "SW Sequencing must be used."); |
1724 | return -E1000_ERR_NVM; | 1724 | return -E1000_ERR_NVM; |
1725 | } | 1725 | } |
@@ -1769,7 +1769,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
1769 | hsfsts.hsf_status.flcdone = 1; | 1769 | hsfsts.hsf_status.flcdone = 1; |
1770 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 1770 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
1771 | } else { | 1771 | } else { |
1772 | hw_dbg(hw, "Flash controller busy, cannot get access"); | 1772 | e_dbg("Flash controller busy, cannot get access"); |
1773 | } | 1773 | } |
1774 | } | 1774 | } |
1775 | 1775 | ||
@@ -1919,7 +1919,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1919 | /* Repeat for some time before giving up. */ | 1919 | /* Repeat for some time before giving up. */ |
1920 | continue; | 1920 | continue; |
1921 | } else if (hsfsts.hsf_status.flcdone == 0) { | 1921 | } else if (hsfsts.hsf_status.flcdone == 0) { |
1922 | hw_dbg(hw, "Timeout error - flash cycle " | 1922 | e_dbg("Timeout error - flash cycle " |
1923 | "did not complete."); | 1923 | "did not complete."); |
1924 | break; | 1924 | break; |
1925 | } | 1925 | } |
@@ -1947,7 +1947,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1947 | 1947 | ||
1948 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1948 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1949 | (words == 0)) { | 1949 | (words == 0)) { |
1950 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1950 | e_dbg("nvm parameter(s) out of bounds\n"); |
1951 | return -E1000_ERR_NVM; | 1951 | return -E1000_ERR_NVM; |
1952 | } | 1952 | } |
1953 | 1953 | ||
@@ -1998,7 +1998,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1998 | */ | 1998 | */ |
1999 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1999 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
2000 | if (ret_val) { | 2000 | if (ret_val) { |
2001 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); | 2001 | e_dbg("Could not detect valid bank, assuming bank 0\n"); |
2002 | bank = 0; | 2002 | bank = 0; |
2003 | } | 2003 | } |
2004 | 2004 | ||
@@ -2072,7 +2072,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2072 | */ | 2072 | */ |
2073 | if (ret_val) { | 2073 | if (ret_val) { |
2074 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | 2074 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ |
2075 | hw_dbg(hw, "Flash commit failed.\n"); | 2075 | e_dbg("Flash commit failed.\n"); |
2076 | nvm->ops.release_nvm(hw); | 2076 | nvm->ops.release_nvm(hw); |
2077 | goto out; | 2077 | goto out; |
2078 | } | 2078 | } |
@@ -2128,7 +2128,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
2128 | 2128 | ||
2129 | out: | 2129 | out: |
2130 | if (ret_val) | 2130 | if (ret_val) |
2131 | hw_dbg(hw, "NVM update error: %d\n", ret_val); | 2131 | e_dbg("NVM update error: %d\n", ret_val); |
2132 | 2132 | ||
2133 | return ret_val; | 2133 | return ret_val; |
2134 | } | 2134 | } |
@@ -2278,7 +2278,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
2278 | /* Repeat for some time before giving up. */ | 2278 | /* Repeat for some time before giving up. */ |
2279 | continue; | 2279 | continue; |
2280 | if (hsfsts.hsf_status.flcdone == 0) { | 2280 | if (hsfsts.hsf_status.flcdone == 0) { |
2281 | hw_dbg(hw, "Timeout error - flash cycle " | 2281 | e_dbg("Timeout error - flash cycle " |
2282 | "did not complete."); | 2282 | "did not complete."); |
2283 | break; | 2283 | break; |
2284 | } | 2284 | } |
@@ -2323,7 +2323,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | |||
2323 | return ret_val; | 2323 | return ret_val; |
2324 | 2324 | ||
2325 | for (program_retries = 0; program_retries < 100; program_retries++) { | 2325 | for (program_retries = 0; program_retries < 100; program_retries++) { |
2326 | hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); | 2326 | e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); |
2327 | udelay(100); | 2327 | udelay(100); |
2328 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | 2328 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); |
2329 | if (!ret_val) | 2329 | if (!ret_val) |
@@ -2458,7 +2458,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) | |||
2458 | 2458 | ||
2459 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 2459 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
2460 | if (ret_val) { | 2460 | if (ret_val) { |
2461 | hw_dbg(hw, "NVM Read Error\n"); | 2461 | e_dbg("NVM Read Error\n"); |
2462 | return ret_val; | 2462 | return ret_val; |
2463 | } | 2463 | } |
2464 | 2464 | ||
@@ -2588,10 +2588,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2588 | */ | 2588 | */ |
2589 | ret_val = e1000e_disable_pcie_master(hw); | 2589 | ret_val = e1000e_disable_pcie_master(hw); |
2590 | if (ret_val) { | 2590 | if (ret_val) { |
2591 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | 2591 | e_dbg("PCI-E Master disable polling has failed.\n"); |
2592 | } | 2592 | } |
2593 | 2593 | ||
2594 | hw_dbg(hw, "Masking off all interrupts\n"); | 2594 | e_dbg("Masking off all interrupts\n"); |
2595 | ew32(IMC, 0xffffffff); | 2595 | ew32(IMC, 0xffffffff); |
2596 | 2596 | ||
2597 | /* | 2597 | /* |
@@ -2643,7 +2643,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2643 | } | 2643 | } |
2644 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 2644 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
2645 | /* Whether or not the swflag was acquired, we need to reset the part */ | 2645 | /* Whether or not the swflag was acquired, we need to reset the part */ |
2646 | hw_dbg(hw, "Issuing a global reset to ich8lan\n"); | 2646 | e_dbg("Issuing a global reset to ich8lan\n"); |
2647 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 2647 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
2648 | msleep(20); | 2648 | msleep(20); |
2649 | 2649 | ||
@@ -2663,7 +2663,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2663 | * return with an error. This can happen in situations | 2663 | * return with an error. This can happen in situations |
2664 | * where there is no eeprom and prevents getting link. | 2664 | * where there is no eeprom and prevents getting link. |
2665 | */ | 2665 | */ |
2666 | hw_dbg(hw, "Auto Read Done did not complete\n"); | 2666 | e_dbg("Auto Read Done did not complete\n"); |
2667 | } | 2667 | } |
2668 | } | 2668 | } |
2669 | /* Dummy read to clear the phy wakeup bit after lcd reset */ | 2669 | /* Dummy read to clear the phy wakeup bit after lcd reset */ |
@@ -2725,7 +2725,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2725 | /* Initialize identification LED */ | 2725 | /* Initialize identification LED */ |
2726 | ret_val = mac->ops.id_led_init(hw); | 2726 | ret_val = mac->ops.id_led_init(hw); |
2727 | if (ret_val) { | 2727 | if (ret_val) { |
2728 | hw_dbg(hw, "Error initializing identification LED\n"); | 2728 | e_dbg("Error initializing identification LED\n"); |
2729 | return ret_val; | 2729 | return ret_val; |
2730 | } | 2730 | } |
2731 | 2731 | ||
@@ -2733,7 +2733,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
2733 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | 2733 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); |
2734 | 2734 | ||
2735 | /* Zero out the Multicast HASH table */ | 2735 | /* Zero out the Multicast HASH table */ |
2736 | hw_dbg(hw, "Zeroing the MTA\n"); | 2736 | e_dbg("Zeroing the MTA\n"); |
2737 | for (i = 0; i < mac->mta_reg_count; i++) | 2737 | for (i = 0; i < mac->mta_reg_count; i++) |
2738 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | 2738 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); |
2739 | 2739 | ||
@@ -2879,7 +2879,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
2879 | */ | 2879 | */ |
2880 | hw->fc.current_mode = hw->fc.requested_mode; | 2880 | hw->fc.current_mode = hw->fc.requested_mode; |
2881 | 2881 | ||
2882 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 2882 | e_dbg("After fix-ups FlowControl is now = %x\n", |
2883 | hw->fc.current_mode); | 2883 | hw->fc.current_mode); |
2884 | 2884 | ||
2885 | /* Continue to configure the copper link. */ | 2885 | /* Continue to configure the copper link. */ |
@@ -3094,7 +3094,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
3094 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 3094 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
3095 | 3095 | ||
3096 | if (hw->mac.type != e1000_ich8lan) { | 3096 | if (hw->mac.type != e1000_ich8lan) { |
3097 | hw_dbg(hw, "Workaround applies to ICH8 only.\n"); | 3097 | e_dbg("Workaround applies to ICH8 only.\n"); |
3098 | return; | 3098 | return; |
3099 | } | 3099 | } |
3100 | 3100 | ||
@@ -3372,8 +3372,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3372 | if (status & E1000_STATUS_PHYRA) | 3372 | if (status & E1000_STATUS_PHYRA) |
3373 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); | 3373 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); |
3374 | else | 3374 | else |
3375 | hw_dbg(hw, | 3375 | e_dbg("PHY Reset Asserted not set - needs delay\n"); |
3376 | "PHY Reset Asserted not set - needs delay\n"); | ||
3377 | } | 3376 | } |
3378 | 3377 | ||
3379 | e1000e_get_cfg_done(hw); | 3378 | e1000e_get_cfg_done(hw); |
@@ -3388,7 +3387,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | |||
3388 | } else { | 3387 | } else { |
3389 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { | 3388 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { |
3390 | /* Maybe we should do a basic PHY config */ | 3389 | /* Maybe we should do a basic PHY config */ |
3391 | hw_dbg(hw, "EEPROM not present\n"); | 3390 | e_dbg("EEPROM not present\n"); |
3392 | return -E1000_ERR_CONFIG; | 3391 | return -E1000_ERR_CONFIG; |
3393 | } | 3392 | } |
3394 | } | 3393 | } |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 5a670a2230e7..fa31c51e5642 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -110,12 +110,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | |||
110 | u32 i; | 110 | u32 i; |
111 | 111 | ||
112 | /* Setup the receive address */ | 112 | /* Setup the receive address */ |
113 | hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); | 113 | e_dbg("Programming MAC Address into RAR[0]\n"); |
114 | 114 | ||
115 | e1000e_rar_set(hw, hw->mac.addr, 0); | 115 | e1000e_rar_set(hw, hw->mac.addr, 0); |
116 | 116 | ||
117 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 117 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
118 | hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); | 118 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
119 | for (i = 1; i < rar_count; i++) { | 119 | for (i = 1; i < rar_count; i++) { |
120 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 120 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); |
121 | e1e_flush(); | 121 | e1e_flush(); |
@@ -271,7 +271,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
271 | for (; mc_addr_count > 0; mc_addr_count--) { | 271 | for (; mc_addr_count > 0; mc_addr_count--) { |
272 | u32 hash_value, hash_reg, hash_bit, mta; | 272 | u32 hash_value, hash_reg, hash_bit, mta; |
273 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | 273 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
274 | hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); | 274 | e_dbg("Hash value = 0x%03X\n", hash_value); |
275 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | 275 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
276 | hash_bit = hash_value & 0x1F; | 276 | hash_bit = hash_value & 0x1F; |
277 | mta = (1 << hash_bit); | 277 | mta = (1 << hash_bit); |
@@ -403,7 +403,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
403 | */ | 403 | */ |
404 | ret_val = e1000e_config_fc_after_link_up(hw); | 404 | ret_val = e1000e_config_fc_after_link_up(hw); |
405 | if (ret_val) { | 405 | if (ret_val) { |
406 | hw_dbg(hw, "Error configuring flow control\n"); | 406 | e_dbg("Error configuring flow control\n"); |
407 | } | 407 | } |
408 | 408 | ||
409 | return ret_val; | 409 | return ret_val; |
@@ -443,7 +443,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
443 | mac->autoneg_failed = 1; | 443 | mac->autoneg_failed = 1; |
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 446 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
447 | 447 | ||
448 | /* Disable auto-negotiation in the TXCW register */ | 448 | /* Disable auto-negotiation in the TXCW register */ |
449 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 449 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -456,7 +456,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
456 | /* Configure Flow Control after forcing link up. */ | 456 | /* Configure Flow Control after forcing link up. */ |
457 | ret_val = e1000e_config_fc_after_link_up(hw); | 457 | ret_val = e1000e_config_fc_after_link_up(hw); |
458 | if (ret_val) { | 458 | if (ret_val) { |
459 | hw_dbg(hw, "Error configuring flow control\n"); | 459 | e_dbg("Error configuring flow control\n"); |
460 | return ret_val; | 460 | return ret_val; |
461 | } | 461 | } |
462 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 462 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -466,7 +466,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
466 | * and disable forced link in the Device Control register | 466 | * and disable forced link in the Device Control register |
467 | * in an attempt to auto-negotiate with our link partner. | 467 | * in an attempt to auto-negotiate with our link partner. |
468 | */ | 468 | */ |
469 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 469 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
470 | ew32(TXCW, mac->txcw); | 470 | ew32(TXCW, mac->txcw); |
471 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 471 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
472 | 472 | ||
@@ -508,7 +508,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
508 | mac->autoneg_failed = 1; | 508 | mac->autoneg_failed = 1; |
509 | return 0; | 509 | return 0; |
510 | } | 510 | } |
511 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 511 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
512 | 512 | ||
513 | /* Disable auto-negotiation in the TXCW register */ | 513 | /* Disable auto-negotiation in the TXCW register */ |
514 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 514 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -521,7 +521,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
521 | /* Configure Flow Control after forcing link up. */ | 521 | /* Configure Flow Control after forcing link up. */ |
522 | ret_val = e1000e_config_fc_after_link_up(hw); | 522 | ret_val = e1000e_config_fc_after_link_up(hw); |
523 | if (ret_val) { | 523 | if (ret_val) { |
524 | hw_dbg(hw, "Error configuring flow control\n"); | 524 | e_dbg("Error configuring flow control\n"); |
525 | return ret_val; | 525 | return ret_val; |
526 | } | 526 | } |
527 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 527 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -531,7 +531,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
531 | * and disable forced link in the Device Control register | 531 | * and disable forced link in the Device Control register |
532 | * in an attempt to auto-negotiate with our link partner. | 532 | * in an attempt to auto-negotiate with our link partner. |
533 | */ | 533 | */ |
534 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 534 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
535 | ew32(TXCW, mac->txcw); | 535 | ew32(TXCW, mac->txcw); |
536 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 536 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
537 | 537 | ||
@@ -548,11 +548,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
548 | if (rxcw & E1000_RXCW_SYNCH) { | 548 | if (rxcw & E1000_RXCW_SYNCH) { |
549 | if (!(rxcw & E1000_RXCW_IV)) { | 549 | if (!(rxcw & E1000_RXCW_IV)) { |
550 | mac->serdes_has_link = true; | 550 | mac->serdes_has_link = true; |
551 | hw_dbg(hw, "SERDES: Link up - forced.\n"); | 551 | e_dbg("SERDES: Link up - forced.\n"); |
552 | } | 552 | } |
553 | } else { | 553 | } else { |
554 | mac->serdes_has_link = false; | 554 | mac->serdes_has_link = false; |
555 | hw_dbg(hw, "SERDES: Link down - force failed.\n"); | 555 | e_dbg("SERDES: Link down - force failed.\n"); |
556 | } | 556 | } |
557 | } | 557 | } |
558 | 558 | ||
@@ -565,20 +565,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
565 | if (rxcw & E1000_RXCW_SYNCH) { | 565 | if (rxcw & E1000_RXCW_SYNCH) { |
566 | if (!(rxcw & E1000_RXCW_IV)) { | 566 | if (!(rxcw & E1000_RXCW_IV)) { |
567 | mac->serdes_has_link = true; | 567 | mac->serdes_has_link = true; |
568 | hw_dbg(hw, "SERDES: Link up - autoneg " | 568 | e_dbg("SERDES: Link up - autoneg " |
569 | "completed sucessfully.\n"); | 569 | "completed sucessfully.\n"); |
570 | } else { | 570 | } else { |
571 | mac->serdes_has_link = false; | 571 | mac->serdes_has_link = false; |
572 | hw_dbg(hw, "SERDES: Link down - invalid" | 572 | e_dbg("SERDES: Link down - invalid" |
573 | "codewords detected in autoneg.\n"); | 573 | "codewords detected in autoneg.\n"); |
574 | } | 574 | } |
575 | } else { | 575 | } else { |
576 | mac->serdes_has_link = false; | 576 | mac->serdes_has_link = false; |
577 | hw_dbg(hw, "SERDES: Link down - no sync.\n"); | 577 | e_dbg("SERDES: Link down - no sync.\n"); |
578 | } | 578 | } |
579 | } else { | 579 | } else { |
580 | mac->serdes_has_link = false; | 580 | mac->serdes_has_link = false; |
581 | hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); | 581 | e_dbg("SERDES: Link down - autoneg failed\n"); |
582 | } | 582 | } |
583 | } | 583 | } |
584 | 584 | ||
@@ -609,7 +609,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
609 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | 609 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
610 | 610 | ||
611 | if (ret_val) { | 611 | if (ret_val) { |
612 | hw_dbg(hw, "NVM Read Error\n"); | 612 | e_dbg("NVM Read Error\n"); |
613 | return ret_val; | 613 | return ret_val; |
614 | } | 614 | } |
615 | 615 | ||
@@ -662,7 +662,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
662 | */ | 662 | */ |
663 | hw->fc.current_mode = hw->fc.requested_mode; | 663 | hw->fc.current_mode = hw->fc.requested_mode; |
664 | 664 | ||
665 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 665 | e_dbg("After fix-ups FlowControl is now = %x\n", |
666 | hw->fc.current_mode); | 666 | hw->fc.current_mode); |
667 | 667 | ||
668 | /* Call the necessary media_type subroutine to configure the link. */ | 668 | /* Call the necessary media_type subroutine to configure the link. */ |
@@ -676,7 +676,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
676 | * control is disabled, because it does not hurt anything to | 676 | * control is disabled, because it does not hurt anything to |
677 | * initialize these registers. | 677 | * initialize these registers. |
678 | */ | 678 | */ |
679 | hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); | 679 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
680 | ew32(FCT, FLOW_CONTROL_TYPE); | 680 | ew32(FCT, FLOW_CONTROL_TYPE); |
681 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 681 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
682 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 682 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
@@ -746,7 +746,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
746 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 746 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
747 | break; | 747 | break; |
748 | default: | 748 | default: |
749 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 749 | e_dbg("Flow control param set incorrectly\n"); |
750 | return -E1000_ERR_CONFIG; | 750 | return -E1000_ERR_CONFIG; |
751 | break; | 751 | break; |
752 | } | 752 | } |
@@ -784,7 +784,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
784 | break; | 784 | break; |
785 | } | 785 | } |
786 | if (i == FIBER_LINK_UP_LIMIT) { | 786 | if (i == FIBER_LINK_UP_LIMIT) { |
787 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 787 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
788 | mac->autoneg_failed = 1; | 788 | mac->autoneg_failed = 1; |
789 | /* | 789 | /* |
790 | * AutoNeg failed to achieve a link, so we'll call | 790 | * AutoNeg failed to achieve a link, so we'll call |
@@ -794,13 +794,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
794 | */ | 794 | */ |
795 | ret_val = mac->ops.check_for_link(hw); | 795 | ret_val = mac->ops.check_for_link(hw); |
796 | if (ret_val) { | 796 | if (ret_val) { |
797 | hw_dbg(hw, "Error while checking for link\n"); | 797 | e_dbg("Error while checking for link\n"); |
798 | return ret_val; | 798 | return ret_val; |
799 | } | 799 | } |
800 | mac->autoneg_failed = 0; | 800 | mac->autoneg_failed = 0; |
801 | } else { | 801 | } else { |
802 | mac->autoneg_failed = 0; | 802 | mac->autoneg_failed = 0; |
803 | hw_dbg(hw, "Valid Link Found\n"); | 803 | e_dbg("Valid Link Found\n"); |
804 | } | 804 | } |
805 | 805 | ||
806 | return 0; | 806 | return 0; |
@@ -836,7 +836,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
836 | * then the link-up status bit will be set and the flow control enable | 836 | * then the link-up status bit will be set and the flow control enable |
837 | * bits (RFCE and TFCE) will be set according to their negotiated value. | 837 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
838 | */ | 838 | */ |
839 | hw_dbg(hw, "Auto-negotiation enabled\n"); | 839 | e_dbg("Auto-negotiation enabled\n"); |
840 | 840 | ||
841 | ew32(CTRL, ctrl); | 841 | ew32(CTRL, ctrl); |
842 | e1e_flush(); | 842 | e1e_flush(); |
@@ -851,7 +851,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
851 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 851 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
852 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 852 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
853 | } else { | 853 | } else { |
854 | hw_dbg(hw, "No signal detected\n"); | 854 | e_dbg("No signal detected\n"); |
855 | } | 855 | } |
856 | 856 | ||
857 | return 0; | 857 | return 0; |
@@ -947,7 +947,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
947 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 947 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
948 | * other: No other values should be possible at this point. | 948 | * other: No other values should be possible at this point. |
949 | */ | 949 | */ |
950 | hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); | 950 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
951 | 951 | ||
952 | switch (hw->fc.current_mode) { | 952 | switch (hw->fc.current_mode) { |
953 | case e1000_fc_none: | 953 | case e1000_fc_none: |
@@ -965,7 +965,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
965 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | 965 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
966 | break; | 966 | break; |
967 | default: | 967 | default: |
968 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 968 | e_dbg("Flow control param set incorrectly\n"); |
969 | return -E1000_ERR_CONFIG; | 969 | return -E1000_ERR_CONFIG; |
970 | } | 970 | } |
971 | 971 | ||
@@ -1006,7 +1006,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | if (ret_val) { | 1008 | if (ret_val) { |
1009 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1009 | e_dbg("Error forcing flow control settings\n"); |
1010 | return ret_val; | 1010 | return ret_val; |
1011 | } | 1011 | } |
1012 | 1012 | ||
@@ -1030,7 +1030,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1030 | return ret_val; | 1030 | return ret_val; |
1031 | 1031 | ||
1032 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | 1032 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
1033 | hw_dbg(hw, "Copper PHY and Auto Neg " | 1033 | e_dbg("Copper PHY and Auto Neg " |
1034 | "has not completed.\n"); | 1034 | "has not completed.\n"); |
1035 | return ret_val; | 1035 | return ret_val; |
1036 | } | 1036 | } |
@@ -1095,10 +1095,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1095 | */ | 1095 | */ |
1096 | if (hw->fc.requested_mode == e1000_fc_full) { | 1096 | if (hw->fc.requested_mode == e1000_fc_full) { |
1097 | hw->fc.current_mode = e1000_fc_full; | 1097 | hw->fc.current_mode = e1000_fc_full; |
1098 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1098 | e_dbg("Flow Control = FULL.\r\n"); |
1099 | } else { | 1099 | } else { |
1100 | hw->fc.current_mode = e1000_fc_rx_pause; | 1100 | hw->fc.current_mode = e1000_fc_rx_pause; |
1101 | hw_dbg(hw, "Flow Control = " | 1101 | e_dbg("Flow Control = " |
1102 | "RX PAUSE frames only.\r\n"); | 1102 | "RX PAUSE frames only.\r\n"); |
1103 | } | 1103 | } |
1104 | } | 1104 | } |
@@ -1116,7 +1116,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1116 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1116 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1117 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1117 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1118 | hw->fc.current_mode = e1000_fc_tx_pause; | 1118 | hw->fc.current_mode = e1000_fc_tx_pause; |
1119 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); | 1119 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
1120 | } | 1120 | } |
1121 | /* | 1121 | /* |
1122 | * For transmitting PAUSE frames ONLY. | 1122 | * For transmitting PAUSE frames ONLY. |
@@ -1132,14 +1132,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1132 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1132 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1133 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1133 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1134 | hw->fc.current_mode = e1000_fc_rx_pause; | 1134 | hw->fc.current_mode = e1000_fc_rx_pause; |
1135 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); | 1135 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
1136 | } else { | 1136 | } else { |
1137 | /* | 1137 | /* |
1138 | * Per the IEEE spec, at this point flow control | 1138 | * Per the IEEE spec, at this point flow control |
1139 | * should be disabled. | 1139 | * should be disabled. |
1140 | */ | 1140 | */ |
1141 | hw->fc.current_mode = e1000_fc_none; | 1141 | hw->fc.current_mode = e1000_fc_none; |
1142 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1142 | e_dbg("Flow Control = NONE.\r\n"); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
@@ -1149,7 +1149,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1149 | */ | 1149 | */ |
1150 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | 1150 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
1151 | if (ret_val) { | 1151 | if (ret_val) { |
1152 | hw_dbg(hw, "Error getting link speed and duplex\n"); | 1152 | e_dbg("Error getting link speed and duplex\n"); |
1153 | return ret_val; | 1153 | return ret_val; |
1154 | } | 1154 | } |
1155 | 1155 | ||
@@ -1162,7 +1162,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1162 | */ | 1162 | */ |
1163 | ret_val = e1000e_force_mac_fc(hw); | 1163 | ret_val = e1000e_force_mac_fc(hw); |
1164 | if (ret_val) { | 1164 | if (ret_val) { |
1165 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1165 | e_dbg("Error forcing flow control settings\n"); |
1166 | return ret_val; | 1166 | return ret_val; |
1167 | } | 1167 | } |
1168 | } | 1168 | } |
@@ -1186,21 +1186,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1186 | status = er32(STATUS); | 1186 | status = er32(STATUS); |
1187 | if (status & E1000_STATUS_SPEED_1000) { | 1187 | if (status & E1000_STATUS_SPEED_1000) { |
1188 | *speed = SPEED_1000; | 1188 | *speed = SPEED_1000; |
1189 | hw_dbg(hw, "1000 Mbs, "); | 1189 | e_dbg("1000 Mbs, "); |
1190 | } else if (status & E1000_STATUS_SPEED_100) { | 1190 | } else if (status & E1000_STATUS_SPEED_100) { |
1191 | *speed = SPEED_100; | 1191 | *speed = SPEED_100; |
1192 | hw_dbg(hw, "100 Mbs, "); | 1192 | e_dbg("100 Mbs, "); |
1193 | } else { | 1193 | } else { |
1194 | *speed = SPEED_10; | 1194 | *speed = SPEED_10; |
1195 | hw_dbg(hw, "10 Mbs, "); | 1195 | e_dbg("10 Mbs, "); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | if (status & E1000_STATUS_FD) { | 1198 | if (status & E1000_STATUS_FD) { |
1199 | *duplex = FULL_DUPLEX; | 1199 | *duplex = FULL_DUPLEX; |
1200 | hw_dbg(hw, "Full Duplex\n"); | 1200 | e_dbg("Full Duplex\n"); |
1201 | } else { | 1201 | } else { |
1202 | *duplex = HALF_DUPLEX; | 1202 | *duplex = HALF_DUPLEX; |
1203 | hw_dbg(hw, "Half Duplex\n"); | 1203 | e_dbg("Half Duplex\n"); |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | return 0; | 1206 | return 0; |
@@ -1246,7 +1246,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | if (i == timeout) { | 1248 | if (i == timeout) { |
1249 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 1249 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
1250 | return -E1000_ERR_NVM; | 1250 | return -E1000_ERR_NVM; |
1251 | } | 1251 | } |
1252 | 1252 | ||
@@ -1265,7 +1265,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1265 | if (i == timeout) { | 1265 | if (i == timeout) { |
1266 | /* Release semaphores */ | 1266 | /* Release semaphores */ |
1267 | e1000e_put_hw_semaphore(hw); | 1267 | e1000e_put_hw_semaphore(hw); |
1268 | hw_dbg(hw, "Driver can't access the NVM\n"); | 1268 | e_dbg("Driver can't access the NVM\n"); |
1269 | return -E1000_ERR_NVM; | 1269 | return -E1000_ERR_NVM; |
1270 | } | 1270 | } |
1271 | 1271 | ||
@@ -1305,7 +1305,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | |||
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | if (i == AUTO_READ_DONE_TIMEOUT) { | 1307 | if (i == AUTO_READ_DONE_TIMEOUT) { |
1308 | hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); | 1308 | e_dbg("Auto read by HW from NVM has not completed.\n"); |
1309 | return -E1000_ERR_RESET; | 1309 | return -E1000_ERR_RESET; |
1310 | } | 1310 | } |
1311 | 1311 | ||
@@ -1326,7 +1326,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | |||
1326 | 1326 | ||
1327 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1327 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1328 | if (ret_val) { | 1328 | if (ret_val) { |
1329 | hw_dbg(hw, "NVM Read Error\n"); | 1329 | e_dbg("NVM Read Error\n"); |
1330 | return ret_val; | 1330 | return ret_val; |
1331 | } | 1331 | } |
1332 | 1332 | ||
@@ -1580,7 +1580,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | |||
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | if (!timeout) { | 1582 | if (!timeout) { |
1583 | hw_dbg(hw, "Master requests are pending.\n"); | 1583 | e_dbg("Master requests are pending.\n"); |
1584 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | 1584 | return -E1000_ERR_MASTER_REQUESTS_PENDING; |
1585 | } | 1585 | } |
1586 | 1586 | ||
@@ -1804,7 +1804,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) | |||
1804 | if (!timeout) { | 1804 | if (!timeout) { |
1805 | eecd &= ~E1000_EECD_REQ; | 1805 | eecd &= ~E1000_EECD_REQ; |
1806 | ew32(EECD, eecd); | 1806 | ew32(EECD, eecd); |
1807 | hw_dbg(hw, "Could not acquire NVM grant\n"); | 1807 | e_dbg("Could not acquire NVM grant\n"); |
1808 | return -E1000_ERR_NVM; | 1808 | return -E1000_ERR_NVM; |
1809 | } | 1809 | } |
1810 | 1810 | ||
@@ -1909,7 +1909,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1909 | } | 1909 | } |
1910 | 1910 | ||
1911 | if (!timeout) { | 1911 | if (!timeout) { |
1912 | hw_dbg(hw, "SPI NVM Status error\n"); | 1912 | e_dbg("SPI NVM Status error\n"); |
1913 | return -E1000_ERR_NVM; | 1913 | return -E1000_ERR_NVM; |
1914 | } | 1914 | } |
1915 | } | 1915 | } |
@@ -1938,7 +1938,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1938 | */ | 1938 | */ |
1939 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1939 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1940 | (words == 0)) { | 1940 | (words == 0)) { |
1941 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1941 | e_dbg("nvm parameter(s) out of bounds\n"); |
1942 | return -E1000_ERR_NVM; | 1942 | return -E1000_ERR_NVM; |
1943 | } | 1943 | } |
1944 | 1944 | ||
@@ -1981,7 +1981,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1981 | */ | 1981 | */ |
1982 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1982 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1983 | (words == 0)) { | 1983 | (words == 0)) { |
1984 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1984 | e_dbg("nvm parameter(s) out of bounds\n"); |
1985 | return -E1000_ERR_NVM; | 1985 | return -E1000_ERR_NVM; |
1986 | } | 1986 | } |
1987 | 1987 | ||
@@ -2061,7 +2061,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2061 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 2061 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
2062 | &mac_addr_offset); | 2062 | &mac_addr_offset); |
2063 | if (ret_val) { | 2063 | if (ret_val) { |
2064 | hw_dbg(hw, "NVM Read Error\n"); | 2064 | e_dbg("NVM Read Error\n"); |
2065 | return ret_val; | 2065 | return ret_val; |
2066 | } | 2066 | } |
2067 | if (mac_addr_offset == 0xFFFF) | 2067 | if (mac_addr_offset == 0xFFFF) |
@@ -2076,7 +2076,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2076 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | 2076 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, |
2077 | &nvm_data); | 2077 | &nvm_data); |
2078 | if (ret_val) { | 2078 | if (ret_val) { |
2079 | hw_dbg(hw, "NVM Read Error\n"); | 2079 | e_dbg("NVM Read Error\n"); |
2080 | return ret_val; | 2080 | return ret_val; |
2081 | } | 2081 | } |
2082 | if (nvm_data & 0x0001) | 2082 | if (nvm_data & 0x0001) |
@@ -2091,7 +2091,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2091 | offset = mac_addr_offset + (i >> 1); | 2091 | offset = mac_addr_offset + (i >> 1); |
2092 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | 2092 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); |
2093 | if (ret_val) { | 2093 | if (ret_val) { |
2094 | hw_dbg(hw, "NVM Read Error\n"); | 2094 | e_dbg("NVM Read Error\n"); |
2095 | return ret_val; | 2095 | return ret_val; |
2096 | } | 2096 | } |
2097 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); | 2097 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); |
@@ -2124,14 +2124,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | |||
2124 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 2124 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
2125 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2125 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2126 | if (ret_val) { | 2126 | if (ret_val) { |
2127 | hw_dbg(hw, "NVM Read Error\n"); | 2127 | e_dbg("NVM Read Error\n"); |
2128 | return ret_val; | 2128 | return ret_val; |
2129 | } | 2129 | } |
2130 | checksum += nvm_data; | 2130 | checksum += nvm_data; |
2131 | } | 2131 | } |
2132 | 2132 | ||
2133 | if (checksum != (u16) NVM_SUM) { | 2133 | if (checksum != (u16) NVM_SUM) { |
2134 | hw_dbg(hw, "NVM Checksum Invalid\n"); | 2134 | e_dbg("NVM Checksum Invalid\n"); |
2135 | return -E1000_ERR_NVM; | 2135 | return -E1000_ERR_NVM; |
2136 | } | 2136 | } |
2137 | 2137 | ||
@@ -2155,7 +2155,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2155 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | 2155 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
2156 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2156 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2157 | if (ret_val) { | 2157 | if (ret_val) { |
2158 | hw_dbg(hw, "NVM Read Error while updating checksum.\n"); | 2158 | e_dbg("NVM Read Error while updating checksum.\n"); |
2159 | return ret_val; | 2159 | return ret_val; |
2160 | } | 2160 | } |
2161 | checksum += nvm_data; | 2161 | checksum += nvm_data; |
@@ -2163,7 +2163,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2163 | checksum = (u16) NVM_SUM - checksum; | 2163 | checksum = (u16) NVM_SUM - checksum; |
2164 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | 2164 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
2165 | if (ret_val) | 2165 | if (ret_val) |
2166 | hw_dbg(hw, "NVM Write Error while updating checksum.\n"); | 2166 | e_dbg("NVM Write Error while updating checksum.\n"); |
2167 | 2167 | ||
2168 | return ret_val; | 2168 | return ret_val; |
2169 | } | 2169 | } |
@@ -2226,7 +2226,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2226 | /* Check that the host interface is enabled. */ | 2226 | /* Check that the host interface is enabled. */ |
2227 | hicr = er32(HICR); | 2227 | hicr = er32(HICR); |
2228 | if ((hicr & E1000_HICR_EN) == 0) { | 2228 | if ((hicr & E1000_HICR_EN) == 0) { |
2229 | hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); | 2229 | e_dbg("E1000_HOST_EN bit disabled.\n"); |
2230 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2230 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2231 | } | 2231 | } |
2232 | /* check the previous command is completed */ | 2232 | /* check the previous command is completed */ |
@@ -2238,7 +2238,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2238 | } | 2238 | } |
2239 | 2239 | ||
2240 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 2240 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
2241 | hw_dbg(hw, "Previous command timeout failed .\n"); | 2241 | e_dbg("Previous command timeout failed .\n"); |
2242 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2242 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2243 | } | 2243 | } |
2244 | 2244 | ||
@@ -2509,14 +2509,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) | |||
2509 | 2509 | ||
2510 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | 2510 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
2511 | if (ret_val) { | 2511 | if (ret_val) { |
2512 | hw_dbg(hw, "NVM Read Error\n"); | 2512 | e_dbg("NVM Read Error\n"); |
2513 | return ret_val; | 2513 | return ret_val; |
2514 | } | 2514 | } |
2515 | *pba_num = (u32)(nvm_data << 16); | 2515 | *pba_num = (u32)(nvm_data << 16); |
2516 | 2516 | ||
2517 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2517 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2518 | if (ret_val) { | 2518 | if (ret_val) { |
2519 | hw_dbg(hw, "NVM Read Error\n"); | 2519 | e_dbg("NVM Read Error\n"); |
2520 | return ret_val; | 2520 | return ret_val; |
2521 | } | 2521 | } |
2522 | *pba_num |= nvm_data; | 2522 | *pba_num |= nvm_data; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index ff9f9f1725e3..0b5352307f16 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
65 | [board_pchlan] = &e1000_pch_info, | 65 | [board_pchlan] = &e1000_pch_info, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | #ifdef DEBUG | ||
69 | /** | ||
70 | * e1000_get_hw_dev_name - return device name string | ||
71 | * used by hardware layer to print debugging information | ||
72 | **/ | ||
73 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | ||
74 | { | ||
75 | return hw->adapter->netdev->name; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | /** | 68 | /** |
80 | * e1000_desc_unused - calculate if we have unused descriptors | 69 | * e1000_desc_unused - calculate if we have unused descriptors |
81 | **/ | 70 | **/ |
@@ -415,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
415 | { | 404 | { |
416 | struct net_device *netdev = adapter->netdev; | 405 | struct net_device *netdev = adapter->netdev; |
417 | struct pci_dev *pdev = adapter->pdev; | 406 | struct pci_dev *pdev = adapter->pdev; |
407 | struct e1000_hw *hw = &adapter->hw; | ||
418 | struct e1000_ring *rx_ring = adapter->rx_ring; | 408 | struct e1000_ring *rx_ring = adapter->rx_ring; |
419 | struct e1000_rx_desc *rx_desc, *next_rxd; | 409 | struct e1000_rx_desc *rx_desc, *next_rxd; |
420 | struct e1000_buffer *buffer_info, *next_buffer; | 410 | struct e1000_buffer *buffer_info, *next_buffer; |
@@ -464,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
464 | * packet, also make sure the frame isn't just CRC only */ | 454 | * packet, also make sure the frame isn't just CRC only */ |
465 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
466 | /* All receives must fit into a single buffer */ | 456 | /* All receives must fit into a single buffer */ |
467 | e_dbg("%s: Receive packet consumed multiple buffers\n", | 457 | e_dbg("Receive packet consumed multiple buffers\n"); |
468 | netdev->name); | ||
469 | /* recycle */ | 458 | /* recycle */ |
470 | buffer_info->skb = skb; | 459 | buffer_info->skb = skb; |
471 | goto next_desc; | 460 | goto next_desc; |
@@ -682,6 +671,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
682 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 671 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
683 | int *work_done, int work_to_do) | 672 | int *work_done, int work_to_do) |
684 | { | 673 | { |
674 | struct e1000_hw *hw = &adapter->hw; | ||
685 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 675 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
686 | struct net_device *netdev = adapter->netdev; | 676 | struct net_device *netdev = adapter->netdev; |
687 | struct pci_dev *pdev = adapter->pdev; | 677 | struct pci_dev *pdev = adapter->pdev; |
@@ -725,8 +715,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
725 | buffer_info->dma = 0; | 715 | buffer_info->dma = 0; |
726 | 716 | ||
727 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 717 | if (!(staterr & E1000_RXD_STAT_EOP)) { |
728 | e_dbg("%s: Packet Split buffers didn't pick up the " | 718 | e_dbg("Packet Split buffers didn't pick up the full " |
729 | "full packet\n", netdev->name); | 719 | "packet\n"); |
730 | dev_kfree_skb_irq(skb); | 720 | dev_kfree_skb_irq(skb); |
731 | goto next_desc; | 721 | goto next_desc; |
732 | } | 722 | } |
@@ -739,8 +729,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
739 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 729 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
740 | 730 | ||
741 | if (!length) { | 731 | if (!length) { |
742 | e_dbg("%s: Last part of the packet spanning multiple " | 732 | e_dbg("Last part of the packet spanning multiple " |
743 | "descriptors\n", netdev->name); | 733 | "descriptors\n"); |
744 | dev_kfree_skb_irq(skb); | 734 | dev_kfree_skb_irq(skb); |
745 | goto next_desc; | 735 | goto next_desc; |
746 | } | 736 | } |
@@ -2931,7 +2921,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
2931 | struct e1000_hw *hw = &adapter->hw; | 2921 | struct e1000_hw *hw = &adapter->hw; |
2932 | u32 icr = er32(ICR); | 2922 | u32 icr = er32(ICR); |
2933 | 2923 | ||
2934 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | 2924 | e_dbg("icr is %08X\n", icr); |
2935 | if (icr & E1000_ICR_RXSEQ) { | 2925 | if (icr & E1000_ICR_RXSEQ) { |
2936 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 2926 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
2937 | wmb(); | 2927 | wmb(); |
@@ -3001,7 +2991,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3001 | goto msi_test_failed; | 2991 | goto msi_test_failed; |
3002 | 2992 | ||
3003 | /* okay so the test worked, restore settings */ | 2993 | /* okay so the test worked, restore settings */ |
3004 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | 2994 | e_dbg("MSI interrupt test succeeded!\n"); |
3005 | msi_test_failed: | 2995 | msi_test_failed: |
3006 | e1000e_set_interrupt_capability(adapter); | 2996 | e1000e_set_interrupt_capability(adapter); |
3007 | e1000_request_irq(adapter); | 2997 | e1000_request_irq(adapter); |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 95a8196cf44c..cff1df204031 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -212,7 +212,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
212 | u32 i, mdic = 0; | 212 | u32 i, mdic = 0; |
213 | 213 | ||
214 | if (offset > MAX_PHY_REG_ADDRESS) { | 214 | if (offset > MAX_PHY_REG_ADDRESS) { |
215 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 215 | e_dbg("PHY Address %d is out of range\n", offset); |
216 | return -E1000_ERR_PARAM; | 216 | return -E1000_ERR_PARAM; |
217 | } | 217 | } |
218 | 218 | ||
@@ -239,11 +239,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
239 | break; | 239 | break; |
240 | } | 240 | } |
241 | if (!(mdic & E1000_MDIC_READY)) { | 241 | if (!(mdic & E1000_MDIC_READY)) { |
242 | hw_dbg(hw, "MDI Read did not complete\n"); | 242 | e_dbg("MDI Read did not complete\n"); |
243 | return -E1000_ERR_PHY; | 243 | return -E1000_ERR_PHY; |
244 | } | 244 | } |
245 | if (mdic & E1000_MDIC_ERROR) { | 245 | if (mdic & E1000_MDIC_ERROR) { |
246 | hw_dbg(hw, "MDI Error\n"); | 246 | e_dbg("MDI Error\n"); |
247 | return -E1000_ERR_PHY; | 247 | return -E1000_ERR_PHY; |
248 | } | 248 | } |
249 | *data = (u16) mdic; | 249 | *data = (u16) mdic; |
@@ -265,7 +265,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
265 | u32 i, mdic = 0; | 265 | u32 i, mdic = 0; |
266 | 266 | ||
267 | if (offset > MAX_PHY_REG_ADDRESS) { | 267 | if (offset > MAX_PHY_REG_ADDRESS) { |
268 | hw_dbg(hw, "PHY Address %d is out of range\n", offset); | 268 | e_dbg("PHY Address %d is out of range\n", offset); |
269 | return -E1000_ERR_PARAM; | 269 | return -E1000_ERR_PARAM; |
270 | } | 270 | } |
271 | 271 | ||
@@ -293,11 +293,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
293 | break; | 293 | break; |
294 | } | 294 | } |
295 | if (!(mdic & E1000_MDIC_READY)) { | 295 | if (!(mdic & E1000_MDIC_READY)) { |
296 | hw_dbg(hw, "MDI Write did not complete\n"); | 296 | e_dbg("MDI Write did not complete\n"); |
297 | return -E1000_ERR_PHY; | 297 | return -E1000_ERR_PHY; |
298 | } | 298 | } |
299 | if (mdic & E1000_MDIC_ERROR) { | 299 | if (mdic & E1000_MDIC_ERROR) { |
300 | hw_dbg(hw, "MDI Error\n"); | 300 | e_dbg("MDI Error\n"); |
301 | return -E1000_ERR_PHY; | 301 | return -E1000_ERR_PHY; |
302 | } | 302 | } |
303 | 303 | ||
@@ -786,7 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
786 | /* Commit the changes. */ | 786 | /* Commit the changes. */ |
787 | ret_val = e1000e_commit_phy(hw); | 787 | ret_val = e1000e_commit_phy(hw); |
788 | if (ret_val) { | 788 | if (ret_val) { |
789 | hw_dbg(hw, "Error committing the PHY changes\n"); | 789 | e_dbg("Error committing the PHY changes\n"); |
790 | return ret_val; | 790 | return ret_val; |
791 | } | 791 | } |
792 | 792 | ||
@@ -823,7 +823,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
823 | 823 | ||
824 | ret_val = e1000_phy_hw_reset(hw); | 824 | ret_val = e1000_phy_hw_reset(hw); |
825 | if (ret_val) { | 825 | if (ret_val) { |
826 | hw_dbg(hw, "Error resetting the PHY.\n"); | 826 | e_dbg("Error resetting the PHY.\n"); |
827 | return ret_val; | 827 | return ret_val; |
828 | } | 828 | } |
829 | 829 | ||
@@ -836,7 +836,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
836 | /* disable lplu d0 during driver init */ | 836 | /* disable lplu d0 during driver init */ |
837 | ret_val = e1000_set_d0_lplu_state(hw, 0); | 837 | ret_val = e1000_set_d0_lplu_state(hw, 0); |
838 | if (ret_val) { | 838 | if (ret_val) { |
839 | hw_dbg(hw, "Error Disabling LPLU D0\n"); | 839 | e_dbg("Error Disabling LPLU D0\n"); |
840 | return ret_val; | 840 | return ret_val; |
841 | } | 841 | } |
842 | /* Configure mdi-mdix settings */ | 842 | /* Configure mdi-mdix settings */ |
@@ -972,39 +972,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
972 | NWAY_AR_10T_HD_CAPS); | 972 | NWAY_AR_10T_HD_CAPS); |
973 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | 973 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); |
974 | 974 | ||
975 | hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); | 975 | e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); |
976 | 976 | ||
977 | /* Do we want to advertise 10 Mb Half Duplex? */ | 977 | /* Do we want to advertise 10 Mb Half Duplex? */ |
978 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | 978 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { |
979 | hw_dbg(hw, "Advertise 10mb Half duplex\n"); | 979 | e_dbg("Advertise 10mb Half duplex\n"); |
980 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | 980 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; |
981 | } | 981 | } |
982 | 982 | ||
983 | /* Do we want to advertise 10 Mb Full Duplex? */ | 983 | /* Do we want to advertise 10 Mb Full Duplex? */ |
984 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | 984 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { |
985 | hw_dbg(hw, "Advertise 10mb Full duplex\n"); | 985 | e_dbg("Advertise 10mb Full duplex\n"); |
986 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | 986 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; |
987 | } | 987 | } |
988 | 988 | ||
989 | /* Do we want to advertise 100 Mb Half Duplex? */ | 989 | /* Do we want to advertise 100 Mb Half Duplex? */ |
990 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | 990 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { |
991 | hw_dbg(hw, "Advertise 100mb Half duplex\n"); | 991 | e_dbg("Advertise 100mb Half duplex\n"); |
992 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | 992 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; |
993 | } | 993 | } |
994 | 994 | ||
995 | /* Do we want to advertise 100 Mb Full Duplex? */ | 995 | /* Do we want to advertise 100 Mb Full Duplex? */ |
996 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | 996 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { |
997 | hw_dbg(hw, "Advertise 100mb Full duplex\n"); | 997 | e_dbg("Advertise 100mb Full duplex\n"); |
998 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | 998 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | 1001 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ |
1002 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) | 1002 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) |
1003 | hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); | 1003 | e_dbg("Advertise 1000mb Half duplex request denied!\n"); |
1004 | 1004 | ||
1005 | /* Do we want to advertise 1000 Mb Full Duplex? */ | 1005 | /* Do we want to advertise 1000 Mb Full Duplex? */ |
1006 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | 1006 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { |
1007 | hw_dbg(hw, "Advertise 1000mb Full duplex\n"); | 1007 | e_dbg("Advertise 1000mb Full duplex\n"); |
1008 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 1008 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1063,7 +1063,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1063 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1063 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1064 | break; | 1064 | break; |
1065 | default: | 1065 | default: |
1066 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 1066 | e_dbg("Flow control param set incorrectly\n"); |
1067 | ret_val = -E1000_ERR_CONFIG; | 1067 | ret_val = -E1000_ERR_CONFIG; |
1068 | return ret_val; | 1068 | return ret_val; |
1069 | } | 1069 | } |
@@ -1072,7 +1072,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1072 | if (ret_val) | 1072 | if (ret_val) |
1073 | return ret_val; | 1073 | return ret_val; |
1074 | 1074 | ||
1075 | hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1075 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1076 | 1076 | ||
1077 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | 1077 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { |
1078 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1078 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
@@ -1109,13 +1109,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1109 | if (phy->autoneg_advertised == 0) | 1109 | if (phy->autoneg_advertised == 0) |
1110 | phy->autoneg_advertised = phy->autoneg_mask; | 1110 | phy->autoneg_advertised = phy->autoneg_mask; |
1111 | 1111 | ||
1112 | hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); | 1112 | e_dbg("Reconfiguring auto-neg advertisement params\n"); |
1113 | ret_val = e1000_phy_setup_autoneg(hw); | 1113 | ret_val = e1000_phy_setup_autoneg(hw); |
1114 | if (ret_val) { | 1114 | if (ret_val) { |
1115 | hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); | 1115 | e_dbg("Error Setting up Auto-Negotiation\n"); |
1116 | return ret_val; | 1116 | return ret_val; |
1117 | } | 1117 | } |
1118 | hw_dbg(hw, "Restarting Auto-Neg\n"); | 1118 | e_dbg("Restarting Auto-Neg\n"); |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
1121 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | 1121 | * Restart auto-negotiation by setting the Auto Neg Enable bit and |
@@ -1137,7 +1137,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1137 | if (phy->autoneg_wait_to_complete) { | 1137 | if (phy->autoneg_wait_to_complete) { |
1138 | ret_val = e1000_wait_autoneg(hw); | 1138 | ret_val = e1000_wait_autoneg(hw); |
1139 | if (ret_val) { | 1139 | if (ret_val) { |
1140 | hw_dbg(hw, "Error while waiting for " | 1140 | e_dbg("Error while waiting for " |
1141 | "autoneg to complete\n"); | 1141 | "autoneg to complete\n"); |
1142 | return ret_val; | 1142 | return ret_val; |
1143 | } | 1143 | } |
@@ -1175,10 +1175,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1175 | * PHY will be set to 10H, 10F, 100H or 100F | 1175 | * PHY will be set to 10H, 10F, 100H or 100F |
1176 | * depending on user settings. | 1176 | * depending on user settings. |
1177 | */ | 1177 | */ |
1178 | hw_dbg(hw, "Forcing Speed and Duplex\n"); | 1178 | e_dbg("Forcing Speed and Duplex\n"); |
1179 | ret_val = e1000_phy_force_speed_duplex(hw); | 1179 | ret_val = e1000_phy_force_speed_duplex(hw); |
1180 | if (ret_val) { | 1180 | if (ret_val) { |
1181 | hw_dbg(hw, "Error Forcing Speed and Duplex\n"); | 1181 | e_dbg("Error Forcing Speed and Duplex\n"); |
1182 | return ret_val; | 1182 | return ret_val; |
1183 | } | 1183 | } |
1184 | } | 1184 | } |
@@ -1195,11 +1195,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
1195 | return ret_val; | 1195 | return ret_val; |
1196 | 1196 | ||
1197 | if (link) { | 1197 | if (link) { |
1198 | hw_dbg(hw, "Valid link established!!!\n"); | 1198 | e_dbg("Valid link established!!!\n"); |
1199 | e1000e_config_collision_dist(hw); | 1199 | e1000e_config_collision_dist(hw); |
1200 | ret_val = e1000e_config_fc_after_link_up(hw); | 1200 | ret_val = e1000e_config_fc_after_link_up(hw); |
1201 | } else { | 1201 | } else { |
1202 | hw_dbg(hw, "Unable to establish link!!!\n"); | 1202 | e_dbg("Unable to establish link!!!\n"); |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | return ret_val; | 1205 | return ret_val; |
@@ -1245,12 +1245,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1245 | if (ret_val) | 1245 | if (ret_val) |
1246 | return ret_val; | 1246 | return ret_val; |
1247 | 1247 | ||
1248 | hw_dbg(hw, "IGP PSCR: %X\n", phy_data); | 1248 | e_dbg("IGP PSCR: %X\n", phy_data); |
1249 | 1249 | ||
1250 | udelay(1); | 1250 | udelay(1); |
1251 | 1251 | ||
1252 | if (phy->autoneg_wait_to_complete) { | 1252 | if (phy->autoneg_wait_to_complete) { |
1253 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); | 1253 | e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); |
1254 | 1254 | ||
1255 | ret_val = e1000e_phy_has_link_generic(hw, | 1255 | ret_val = e1000e_phy_has_link_generic(hw, |
1256 | PHY_FORCE_LIMIT, | 1256 | PHY_FORCE_LIMIT, |
@@ -1260,7 +1260,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1260 | return ret_val; | 1260 | return ret_val; |
1261 | 1261 | ||
1262 | if (!link) | 1262 | if (!link) |
1263 | hw_dbg(hw, "Link taking longer than expected.\n"); | 1263 | e_dbg("Link taking longer than expected.\n"); |
1264 | 1264 | ||
1265 | /* Try once more */ | 1265 | /* Try once more */ |
1266 | ret_val = e1000e_phy_has_link_generic(hw, | 1266 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -1304,7 +1304,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1304 | if (ret_val) | 1304 | if (ret_val) |
1305 | return ret_val; | 1305 | return ret_val; |
1306 | 1306 | ||
1307 | hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); | 1307 | e_dbg("M88E1000 PSCR: %X\n", phy_data); |
1308 | 1308 | ||
1309 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | 1309 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
1310 | if (ret_val) | 1310 | if (ret_val) |
@@ -1322,7 +1322,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1322 | return ret_val; | 1322 | return ret_val; |
1323 | 1323 | ||
1324 | if (phy->autoneg_wait_to_complete) { | 1324 | if (phy->autoneg_wait_to_complete) { |
1325 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); | 1325 | e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); |
1326 | 1326 | ||
1327 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 1327 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
1328 | 100000, &link); | 1328 | 100000, &link); |
@@ -1413,11 +1413,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1413 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | 1413 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { |
1414 | ctrl &= ~E1000_CTRL_FD; | 1414 | ctrl &= ~E1000_CTRL_FD; |
1415 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | 1415 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; |
1416 | hw_dbg(hw, "Half Duplex\n"); | 1416 | e_dbg("Half Duplex\n"); |
1417 | } else { | 1417 | } else { |
1418 | ctrl |= E1000_CTRL_FD; | 1418 | ctrl |= E1000_CTRL_FD; |
1419 | *phy_ctrl |= MII_CR_FULL_DUPLEX; | 1419 | *phy_ctrl |= MII_CR_FULL_DUPLEX; |
1420 | hw_dbg(hw, "Full Duplex\n"); | 1420 | e_dbg("Full Duplex\n"); |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | /* Forcing 10mb or 100mb? */ | 1423 | /* Forcing 10mb or 100mb? */ |
@@ -1425,12 +1425,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1425 | ctrl |= E1000_CTRL_SPD_100; | 1425 | ctrl |= E1000_CTRL_SPD_100; |
1426 | *phy_ctrl |= MII_CR_SPEED_100; | 1426 | *phy_ctrl |= MII_CR_SPEED_100; |
1427 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | 1427 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); |
1428 | hw_dbg(hw, "Forcing 100mb\n"); | 1428 | e_dbg("Forcing 100mb\n"); |
1429 | } else { | 1429 | } else { |
1430 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | 1430 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); |
1431 | *phy_ctrl |= MII_CR_SPEED_10; | 1431 | *phy_ctrl |= MII_CR_SPEED_10; |
1432 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | 1432 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); |
1433 | hw_dbg(hw, "Forcing 10mb\n"); | 1433 | e_dbg("Forcing 10mb\n"); |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | e1000e_config_collision_dist(hw); | 1436 | e1000e_config_collision_dist(hw); |
@@ -1826,7 +1826,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1826 | bool link; | 1826 | bool link; |
1827 | 1827 | ||
1828 | if (hw->phy.media_type != e1000_media_type_copper) { | 1828 | if (hw->phy.media_type != e1000_media_type_copper) { |
1829 | hw_dbg(hw, "Phy info is only valid for copper media\n"); | 1829 | e_dbg("Phy info is only valid for copper media\n"); |
1830 | return -E1000_ERR_CONFIG; | 1830 | return -E1000_ERR_CONFIG; |
1831 | } | 1831 | } |
1832 | 1832 | ||
@@ -1835,7 +1835,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1835 | return ret_val; | 1835 | return ret_val; |
1836 | 1836 | ||
1837 | if (!link) { | 1837 | if (!link) { |
1838 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1838 | e_dbg("Phy info is only valid if link is up\n"); |
1839 | return -E1000_ERR_CONFIG; | 1839 | return -E1000_ERR_CONFIG; |
1840 | } | 1840 | } |
1841 | 1841 | ||
@@ -1903,7 +1903,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | |||
1903 | return ret_val; | 1903 | return ret_val; |
1904 | 1904 | ||
1905 | if (!link) { | 1905 | if (!link) { |
1906 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 1906 | e_dbg("Phy info is only valid if link is up\n"); |
1907 | return -E1000_ERR_CONFIG; | 1907 | return -E1000_ERR_CONFIG; |
1908 | } | 1908 | } |
1909 | 1909 | ||
@@ -2031,7 +2031,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw) | |||
2031 | **/ | 2031 | **/ |
2032 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) | 2032 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) |
2033 | { | 2033 | { |
2034 | hw_dbg(hw, "Running IGP 3 PHY init script\n"); | 2034 | e_dbg("Running IGP 3 PHY init script\n"); |
2035 | 2035 | ||
2036 | /* PHY init IGP 3 */ | 2036 | /* PHY init IGP 3 */ |
2037 | /* Enable rise/fall, 10-mode work in class-A */ | 2037 | /* Enable rise/fall, 10-mode work in class-A */ |
@@ -2474,7 +2474,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2474 | /* Gig must be disabled for MDIO accesses to page 800 */ | 2474 | /* Gig must be disabled for MDIO accesses to page 800 */ |
2475 | if ((hw->mac.type == e1000_pchlan) && | 2475 | if ((hw->mac.type == e1000_pchlan) && |
2476 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | 2476 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) |
2477 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); | 2477 | e_dbg("Attempting to access page 800 while gig enabled\n"); |
2478 | 2478 | ||
2479 | /* All operations in this function are phy address 1 */ | 2479 | /* All operations in this function are phy address 1 */ |
2480 | hw->phy.addr = 1; | 2480 | hw->phy.addr = 1; |
@@ -2884,7 +2884,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2884 | /* masking with 0x3F to remove the page from offset */ | 2884 | /* masking with 0x3F to remove the page from offset */ |
2885 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | 2885 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); |
2886 | if (ret_val) { | 2886 | if (ret_val) { |
2887 | hw_dbg(hw, "Could not write PHY the HV address register\n"); | 2887 | e_dbg("Could not write PHY the HV address register\n"); |
2888 | goto out; | 2888 | goto out; |
2889 | } | 2889 | } |
2890 | 2890 | ||
@@ -2895,7 +2895,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2895 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | 2895 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); |
2896 | 2896 | ||
2897 | if (ret_val) { | 2897 | if (ret_val) { |
2898 | hw_dbg(hw, "Could not read data value from HV data register\n"); | 2898 | e_dbg("Could not read data value from HV data register\n"); |
2899 | goto out; | 2899 | goto out; |
2900 | } | 2900 | } |
2901 | 2901 | ||
@@ -3021,12 +3021,12 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3021 | if (ret_val) | 3021 | if (ret_val) |
3022 | goto out; | 3022 | goto out; |
3023 | 3023 | ||
3024 | hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data); | 3024 | e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data); |
3025 | 3025 | ||
3026 | udelay(1); | 3026 | udelay(1); |
3027 | 3027 | ||
3028 | if (phy->autoneg_wait_to_complete) { | 3028 | if (phy->autoneg_wait_to_complete) { |
3029 | hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n"); | 3029 | e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); |
3030 | 3030 | ||
3031 | ret_val = e1000e_phy_has_link_generic(hw, | 3031 | ret_val = e1000e_phy_has_link_generic(hw, |
3032 | PHY_FORCE_LIMIT, | 3032 | PHY_FORCE_LIMIT, |
@@ -3036,7 +3036,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3036 | goto out; | 3036 | goto out; |
3037 | 3037 | ||
3038 | if (!link) | 3038 | if (!link) |
3039 | hw_dbg(hw, "Link taking longer than expected.\n"); | 3039 | e_dbg("Link taking longer than expected.\n"); |
3040 | 3040 | ||
3041 | /* Try once more */ | 3041 | /* Try once more */ |
3042 | ret_val = e1000e_phy_has_link_generic(hw, | 3042 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -3072,7 +3072,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3072 | goto out; | 3072 | goto out; |
3073 | 3073 | ||
3074 | if (!link) { | 3074 | if (!link) { |
3075 | hw_dbg(hw, "Phy info is only valid if link is up\n"); | 3075 | e_dbg("Phy info is only valid if link is up\n"); |
3076 | ret_val = -E1000_ERR_CONFIG; | 3076 | ret_val = -E1000_ERR_CONFIG; |
3077 | goto out; | 3077 | goto out; |
3078 | } | 3078 | } |