diff options
author | Bruce Allan <bruce.w.allan@intel.com> | 2009-11-20 18:25:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-21 14:34:05 -0500 |
commit | 3bb99fe226ead584a4db674dab546689f705201f (patch) | |
tree | 3b49aaef9f4b798b7930a76f62e754eefe0ddb91 /drivers/net/e1000e/lib.c | |
parent | d8014dbca7f5d2d6f0fdb47e5286bd2d887f7065 (diff) |
e1000e: consolidate two dbug macros into one simpler one
This patch depends on a previous one that cleans up redundant #includes.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000e/lib.c')
-rw-r--r-- | drivers/net/e1000e/lib.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 5a670a2230e7..fa31c51e5642 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -110,12 +110,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | |||
110 | u32 i; | 110 | u32 i; |
111 | 111 | ||
112 | /* Setup the receive address */ | 112 | /* Setup the receive address */ |
113 | hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); | 113 | e_dbg("Programming MAC Address into RAR[0]\n"); |
114 | 114 | ||
115 | e1000e_rar_set(hw, hw->mac.addr, 0); | 115 | e1000e_rar_set(hw, hw->mac.addr, 0); |
116 | 116 | ||
117 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 117 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
118 | hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); | 118 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
119 | for (i = 1; i < rar_count; i++) { | 119 | for (i = 1; i < rar_count; i++) { |
120 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 120 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); |
121 | e1e_flush(); | 121 | e1e_flush(); |
@@ -271,7 +271,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | |||
271 | for (; mc_addr_count > 0; mc_addr_count--) { | 271 | for (; mc_addr_count > 0; mc_addr_count--) { |
272 | u32 hash_value, hash_reg, hash_bit, mta; | 272 | u32 hash_value, hash_reg, hash_bit, mta; |
273 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | 273 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
274 | hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); | 274 | e_dbg("Hash value = 0x%03X\n", hash_value); |
275 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | 275 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
276 | hash_bit = hash_value & 0x1F; | 276 | hash_bit = hash_value & 0x1F; |
277 | mta = (1 << hash_bit); | 277 | mta = (1 << hash_bit); |
@@ -403,7 +403,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
403 | */ | 403 | */ |
404 | ret_val = e1000e_config_fc_after_link_up(hw); | 404 | ret_val = e1000e_config_fc_after_link_up(hw); |
405 | if (ret_val) { | 405 | if (ret_val) { |
406 | hw_dbg(hw, "Error configuring flow control\n"); | 406 | e_dbg("Error configuring flow control\n"); |
407 | } | 407 | } |
408 | 408 | ||
409 | return ret_val; | 409 | return ret_val; |
@@ -443,7 +443,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
443 | mac->autoneg_failed = 1; | 443 | mac->autoneg_failed = 1; |
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 446 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
447 | 447 | ||
448 | /* Disable auto-negotiation in the TXCW register */ | 448 | /* Disable auto-negotiation in the TXCW register */ |
449 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 449 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -456,7 +456,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
456 | /* Configure Flow Control after forcing link up. */ | 456 | /* Configure Flow Control after forcing link up. */ |
457 | ret_val = e1000e_config_fc_after_link_up(hw); | 457 | ret_val = e1000e_config_fc_after_link_up(hw); |
458 | if (ret_val) { | 458 | if (ret_val) { |
459 | hw_dbg(hw, "Error configuring flow control\n"); | 459 | e_dbg("Error configuring flow control\n"); |
460 | return ret_val; | 460 | return ret_val; |
461 | } | 461 | } |
462 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 462 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -466,7 +466,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
466 | * and disable forced link in the Device Control register | 466 | * and disable forced link in the Device Control register |
467 | * in an attempt to auto-negotiate with our link partner. | 467 | * in an attempt to auto-negotiate with our link partner. |
468 | */ | 468 | */ |
469 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 469 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
470 | ew32(TXCW, mac->txcw); | 470 | ew32(TXCW, mac->txcw); |
471 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 471 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
472 | 472 | ||
@@ -508,7 +508,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
508 | mac->autoneg_failed = 1; | 508 | mac->autoneg_failed = 1; |
509 | return 0; | 509 | return 0; |
510 | } | 510 | } |
511 | hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); | 511 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
512 | 512 | ||
513 | /* Disable auto-negotiation in the TXCW register */ | 513 | /* Disable auto-negotiation in the TXCW register */ |
514 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 514 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -521,7 +521,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
521 | /* Configure Flow Control after forcing link up. */ | 521 | /* Configure Flow Control after forcing link up. */ |
522 | ret_val = e1000e_config_fc_after_link_up(hw); | 522 | ret_val = e1000e_config_fc_after_link_up(hw); |
523 | if (ret_val) { | 523 | if (ret_val) { |
524 | hw_dbg(hw, "Error configuring flow control\n"); | 524 | e_dbg("Error configuring flow control\n"); |
525 | return ret_val; | 525 | return ret_val; |
526 | } | 526 | } |
527 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 527 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
@@ -531,7 +531,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
531 | * and disable forced link in the Device Control register | 531 | * and disable forced link in the Device Control register |
532 | * in an attempt to auto-negotiate with our link partner. | 532 | * in an attempt to auto-negotiate with our link partner. |
533 | */ | 533 | */ |
534 | hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); | 534 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
535 | ew32(TXCW, mac->txcw); | 535 | ew32(TXCW, mac->txcw); |
536 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 536 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
537 | 537 | ||
@@ -548,11 +548,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
548 | if (rxcw & E1000_RXCW_SYNCH) { | 548 | if (rxcw & E1000_RXCW_SYNCH) { |
549 | if (!(rxcw & E1000_RXCW_IV)) { | 549 | if (!(rxcw & E1000_RXCW_IV)) { |
550 | mac->serdes_has_link = true; | 550 | mac->serdes_has_link = true; |
551 | hw_dbg(hw, "SERDES: Link up - forced.\n"); | 551 | e_dbg("SERDES: Link up - forced.\n"); |
552 | } | 552 | } |
553 | } else { | 553 | } else { |
554 | mac->serdes_has_link = false; | 554 | mac->serdes_has_link = false; |
555 | hw_dbg(hw, "SERDES: Link down - force failed.\n"); | 555 | e_dbg("SERDES: Link down - force failed.\n"); |
556 | } | 556 | } |
557 | } | 557 | } |
558 | 558 | ||
@@ -565,20 +565,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
565 | if (rxcw & E1000_RXCW_SYNCH) { | 565 | if (rxcw & E1000_RXCW_SYNCH) { |
566 | if (!(rxcw & E1000_RXCW_IV)) { | 566 | if (!(rxcw & E1000_RXCW_IV)) { |
567 | mac->serdes_has_link = true; | 567 | mac->serdes_has_link = true; |
568 | hw_dbg(hw, "SERDES: Link up - autoneg " | 568 | e_dbg("SERDES: Link up - autoneg " |
569 | "completed sucessfully.\n"); | 569 | "completed sucessfully.\n"); |
570 | } else { | 570 | } else { |
571 | mac->serdes_has_link = false; | 571 | mac->serdes_has_link = false; |
572 | hw_dbg(hw, "SERDES: Link down - invalid" | 572 | e_dbg("SERDES: Link down - invalid" |
573 | "codewords detected in autoneg.\n"); | 573 | "codewords detected in autoneg.\n"); |
574 | } | 574 | } |
575 | } else { | 575 | } else { |
576 | mac->serdes_has_link = false; | 576 | mac->serdes_has_link = false; |
577 | hw_dbg(hw, "SERDES: Link down - no sync.\n"); | 577 | e_dbg("SERDES: Link down - no sync.\n"); |
578 | } | 578 | } |
579 | } else { | 579 | } else { |
580 | mac->serdes_has_link = false; | 580 | mac->serdes_has_link = false; |
581 | hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); | 581 | e_dbg("SERDES: Link down - autoneg failed\n"); |
582 | } | 582 | } |
583 | } | 583 | } |
584 | 584 | ||
@@ -609,7 +609,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
609 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | 609 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
610 | 610 | ||
611 | if (ret_val) { | 611 | if (ret_val) { |
612 | hw_dbg(hw, "NVM Read Error\n"); | 612 | e_dbg("NVM Read Error\n"); |
613 | return ret_val; | 613 | return ret_val; |
614 | } | 614 | } |
615 | 615 | ||
@@ -662,7 +662,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
662 | */ | 662 | */ |
663 | hw->fc.current_mode = hw->fc.requested_mode; | 663 | hw->fc.current_mode = hw->fc.requested_mode; |
664 | 664 | ||
665 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", | 665 | e_dbg("After fix-ups FlowControl is now = %x\n", |
666 | hw->fc.current_mode); | 666 | hw->fc.current_mode); |
667 | 667 | ||
668 | /* Call the necessary media_type subroutine to configure the link. */ | 668 | /* Call the necessary media_type subroutine to configure the link. */ |
@@ -676,7 +676,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
676 | * control is disabled, because it does not hurt anything to | 676 | * control is disabled, because it does not hurt anything to |
677 | * initialize these registers. | 677 | * initialize these registers. |
678 | */ | 678 | */ |
679 | hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); | 679 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
680 | ew32(FCT, FLOW_CONTROL_TYPE); | 680 | ew32(FCT, FLOW_CONTROL_TYPE); |
681 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 681 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
682 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 682 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
@@ -746,7 +746,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
746 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 746 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
747 | break; | 747 | break; |
748 | default: | 748 | default: |
749 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 749 | e_dbg("Flow control param set incorrectly\n"); |
750 | return -E1000_ERR_CONFIG; | 750 | return -E1000_ERR_CONFIG; |
751 | break; | 751 | break; |
752 | } | 752 | } |
@@ -784,7 +784,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
784 | break; | 784 | break; |
785 | } | 785 | } |
786 | if (i == FIBER_LINK_UP_LIMIT) { | 786 | if (i == FIBER_LINK_UP_LIMIT) { |
787 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 787 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
788 | mac->autoneg_failed = 1; | 788 | mac->autoneg_failed = 1; |
789 | /* | 789 | /* |
790 | * AutoNeg failed to achieve a link, so we'll call | 790 | * AutoNeg failed to achieve a link, so we'll call |
@@ -794,13 +794,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
794 | */ | 794 | */ |
795 | ret_val = mac->ops.check_for_link(hw); | 795 | ret_val = mac->ops.check_for_link(hw); |
796 | if (ret_val) { | 796 | if (ret_val) { |
797 | hw_dbg(hw, "Error while checking for link\n"); | 797 | e_dbg("Error while checking for link\n"); |
798 | return ret_val; | 798 | return ret_val; |
799 | } | 799 | } |
800 | mac->autoneg_failed = 0; | 800 | mac->autoneg_failed = 0; |
801 | } else { | 801 | } else { |
802 | mac->autoneg_failed = 0; | 802 | mac->autoneg_failed = 0; |
803 | hw_dbg(hw, "Valid Link Found\n"); | 803 | e_dbg("Valid Link Found\n"); |
804 | } | 804 | } |
805 | 805 | ||
806 | return 0; | 806 | return 0; |
@@ -836,7 +836,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
836 | * then the link-up status bit will be set and the flow control enable | 836 | * then the link-up status bit will be set and the flow control enable |
837 | * bits (RFCE and TFCE) will be set according to their negotiated value. | 837 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
838 | */ | 838 | */ |
839 | hw_dbg(hw, "Auto-negotiation enabled\n"); | 839 | e_dbg("Auto-negotiation enabled\n"); |
840 | 840 | ||
841 | ew32(CTRL, ctrl); | 841 | ew32(CTRL, ctrl); |
842 | e1e_flush(); | 842 | e1e_flush(); |
@@ -851,7 +851,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
851 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 851 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
852 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 852 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
853 | } else { | 853 | } else { |
854 | hw_dbg(hw, "No signal detected\n"); | 854 | e_dbg("No signal detected\n"); |
855 | } | 855 | } |
856 | 856 | ||
857 | return 0; | 857 | return 0; |
@@ -947,7 +947,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
947 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 947 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
948 | * other: No other values should be possible at this point. | 948 | * other: No other values should be possible at this point. |
949 | */ | 949 | */ |
950 | hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); | 950 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
951 | 951 | ||
952 | switch (hw->fc.current_mode) { | 952 | switch (hw->fc.current_mode) { |
953 | case e1000_fc_none: | 953 | case e1000_fc_none: |
@@ -965,7 +965,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
965 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | 965 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
966 | break; | 966 | break; |
967 | default: | 967 | default: |
968 | hw_dbg(hw, "Flow control param set incorrectly\n"); | 968 | e_dbg("Flow control param set incorrectly\n"); |
969 | return -E1000_ERR_CONFIG; | 969 | return -E1000_ERR_CONFIG; |
970 | } | 970 | } |
971 | 971 | ||
@@ -1006,7 +1006,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | if (ret_val) { | 1008 | if (ret_val) { |
1009 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1009 | e_dbg("Error forcing flow control settings\n"); |
1010 | return ret_val; | 1010 | return ret_val; |
1011 | } | 1011 | } |
1012 | 1012 | ||
@@ -1030,7 +1030,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1030 | return ret_val; | 1030 | return ret_val; |
1031 | 1031 | ||
1032 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | 1032 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
1033 | hw_dbg(hw, "Copper PHY and Auto Neg " | 1033 | e_dbg("Copper PHY and Auto Neg " |
1034 | "has not completed.\n"); | 1034 | "has not completed.\n"); |
1035 | return ret_val; | 1035 | return ret_val; |
1036 | } | 1036 | } |
@@ -1095,10 +1095,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1095 | */ | 1095 | */ |
1096 | if (hw->fc.requested_mode == e1000_fc_full) { | 1096 | if (hw->fc.requested_mode == e1000_fc_full) { |
1097 | hw->fc.current_mode = e1000_fc_full; | 1097 | hw->fc.current_mode = e1000_fc_full; |
1098 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1098 | e_dbg("Flow Control = FULL.\r\n"); |
1099 | } else { | 1099 | } else { |
1100 | hw->fc.current_mode = e1000_fc_rx_pause; | 1100 | hw->fc.current_mode = e1000_fc_rx_pause; |
1101 | hw_dbg(hw, "Flow Control = " | 1101 | e_dbg("Flow Control = " |
1102 | "RX PAUSE frames only.\r\n"); | 1102 | "RX PAUSE frames only.\r\n"); |
1103 | } | 1103 | } |
1104 | } | 1104 | } |
@@ -1116,7 +1116,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1116 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1116 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1117 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1117 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1118 | hw->fc.current_mode = e1000_fc_tx_pause; | 1118 | hw->fc.current_mode = e1000_fc_tx_pause; |
1119 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); | 1119 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
1120 | } | 1120 | } |
1121 | /* | 1121 | /* |
1122 | * For transmitting PAUSE frames ONLY. | 1122 | * For transmitting PAUSE frames ONLY. |
@@ -1132,14 +1132,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1132 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1132 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1133 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1133 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1134 | hw->fc.current_mode = e1000_fc_rx_pause; | 1134 | hw->fc.current_mode = e1000_fc_rx_pause; |
1135 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); | 1135 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
1136 | } else { | 1136 | } else { |
1137 | /* | 1137 | /* |
1138 | * Per the IEEE spec, at this point flow control | 1138 | * Per the IEEE spec, at this point flow control |
1139 | * should be disabled. | 1139 | * should be disabled. |
1140 | */ | 1140 | */ |
1141 | hw->fc.current_mode = e1000_fc_none; | 1141 | hw->fc.current_mode = e1000_fc_none; |
1142 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1142 | e_dbg("Flow Control = NONE.\r\n"); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
@@ -1149,7 +1149,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1149 | */ | 1149 | */ |
1150 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | 1150 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
1151 | if (ret_val) { | 1151 | if (ret_val) { |
1152 | hw_dbg(hw, "Error getting link speed and duplex\n"); | 1152 | e_dbg("Error getting link speed and duplex\n"); |
1153 | return ret_val; | 1153 | return ret_val; |
1154 | } | 1154 | } |
1155 | 1155 | ||
@@ -1162,7 +1162,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1162 | */ | 1162 | */ |
1163 | ret_val = e1000e_force_mac_fc(hw); | 1163 | ret_val = e1000e_force_mac_fc(hw); |
1164 | if (ret_val) { | 1164 | if (ret_val) { |
1165 | hw_dbg(hw, "Error forcing flow control settings\n"); | 1165 | e_dbg("Error forcing flow control settings\n"); |
1166 | return ret_val; | 1166 | return ret_val; |
1167 | } | 1167 | } |
1168 | } | 1168 | } |
@@ -1186,21 +1186,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1186 | status = er32(STATUS); | 1186 | status = er32(STATUS); |
1187 | if (status & E1000_STATUS_SPEED_1000) { | 1187 | if (status & E1000_STATUS_SPEED_1000) { |
1188 | *speed = SPEED_1000; | 1188 | *speed = SPEED_1000; |
1189 | hw_dbg(hw, "1000 Mbs, "); | 1189 | e_dbg("1000 Mbs, "); |
1190 | } else if (status & E1000_STATUS_SPEED_100) { | 1190 | } else if (status & E1000_STATUS_SPEED_100) { |
1191 | *speed = SPEED_100; | 1191 | *speed = SPEED_100; |
1192 | hw_dbg(hw, "100 Mbs, "); | 1192 | e_dbg("100 Mbs, "); |
1193 | } else { | 1193 | } else { |
1194 | *speed = SPEED_10; | 1194 | *speed = SPEED_10; |
1195 | hw_dbg(hw, "10 Mbs, "); | 1195 | e_dbg("10 Mbs, "); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | if (status & E1000_STATUS_FD) { | 1198 | if (status & E1000_STATUS_FD) { |
1199 | *duplex = FULL_DUPLEX; | 1199 | *duplex = FULL_DUPLEX; |
1200 | hw_dbg(hw, "Full Duplex\n"); | 1200 | e_dbg("Full Duplex\n"); |
1201 | } else { | 1201 | } else { |
1202 | *duplex = HALF_DUPLEX; | 1202 | *duplex = HALF_DUPLEX; |
1203 | hw_dbg(hw, "Half Duplex\n"); | 1203 | e_dbg("Half Duplex\n"); |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | return 0; | 1206 | return 0; |
@@ -1246,7 +1246,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | if (i == timeout) { | 1248 | if (i == timeout) { |
1249 | hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); | 1249 | e_dbg("Driver can't access device - SMBI bit is set.\n"); |
1250 | return -E1000_ERR_NVM; | 1250 | return -E1000_ERR_NVM; |
1251 | } | 1251 | } |
1252 | 1252 | ||
@@ -1265,7 +1265,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | |||
1265 | if (i == timeout) { | 1265 | if (i == timeout) { |
1266 | /* Release semaphores */ | 1266 | /* Release semaphores */ |
1267 | e1000e_put_hw_semaphore(hw); | 1267 | e1000e_put_hw_semaphore(hw); |
1268 | hw_dbg(hw, "Driver can't access the NVM\n"); | 1268 | e_dbg("Driver can't access the NVM\n"); |
1269 | return -E1000_ERR_NVM; | 1269 | return -E1000_ERR_NVM; |
1270 | } | 1270 | } |
1271 | 1271 | ||
@@ -1305,7 +1305,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | |||
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | if (i == AUTO_READ_DONE_TIMEOUT) { | 1307 | if (i == AUTO_READ_DONE_TIMEOUT) { |
1308 | hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); | 1308 | e_dbg("Auto read by HW from NVM has not completed.\n"); |
1309 | return -E1000_ERR_RESET; | 1309 | return -E1000_ERR_RESET; |
1310 | } | 1310 | } |
1311 | 1311 | ||
@@ -1326,7 +1326,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | |||
1326 | 1326 | ||
1327 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | 1327 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
1328 | if (ret_val) { | 1328 | if (ret_val) { |
1329 | hw_dbg(hw, "NVM Read Error\n"); | 1329 | e_dbg("NVM Read Error\n"); |
1330 | return ret_val; | 1330 | return ret_val; |
1331 | } | 1331 | } |
1332 | 1332 | ||
@@ -1580,7 +1580,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | |||
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | if (!timeout) { | 1582 | if (!timeout) { |
1583 | hw_dbg(hw, "Master requests are pending.\n"); | 1583 | e_dbg("Master requests are pending.\n"); |
1584 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | 1584 | return -E1000_ERR_MASTER_REQUESTS_PENDING; |
1585 | } | 1585 | } |
1586 | 1586 | ||
@@ -1804,7 +1804,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) | |||
1804 | if (!timeout) { | 1804 | if (!timeout) { |
1805 | eecd &= ~E1000_EECD_REQ; | 1805 | eecd &= ~E1000_EECD_REQ; |
1806 | ew32(EECD, eecd); | 1806 | ew32(EECD, eecd); |
1807 | hw_dbg(hw, "Could not acquire NVM grant\n"); | 1807 | e_dbg("Could not acquire NVM grant\n"); |
1808 | return -E1000_ERR_NVM; | 1808 | return -E1000_ERR_NVM; |
1809 | } | 1809 | } |
1810 | 1810 | ||
@@ -1909,7 +1909,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1909 | } | 1909 | } |
1910 | 1910 | ||
1911 | if (!timeout) { | 1911 | if (!timeout) { |
1912 | hw_dbg(hw, "SPI NVM Status error\n"); | 1912 | e_dbg("SPI NVM Status error\n"); |
1913 | return -E1000_ERR_NVM; | 1913 | return -E1000_ERR_NVM; |
1914 | } | 1914 | } |
1915 | } | 1915 | } |
@@ -1938,7 +1938,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1938 | */ | 1938 | */ |
1939 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1939 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1940 | (words == 0)) { | 1940 | (words == 0)) { |
1941 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1941 | e_dbg("nvm parameter(s) out of bounds\n"); |
1942 | return -E1000_ERR_NVM; | 1942 | return -E1000_ERR_NVM; |
1943 | } | 1943 | } |
1944 | 1944 | ||
@@ -1981,7 +1981,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1981 | */ | 1981 | */ |
1982 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1982 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1983 | (words == 0)) { | 1983 | (words == 0)) { |
1984 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1984 | e_dbg("nvm parameter(s) out of bounds\n"); |
1985 | return -E1000_ERR_NVM; | 1985 | return -E1000_ERR_NVM; |
1986 | } | 1986 | } |
1987 | 1987 | ||
@@ -2061,7 +2061,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2061 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 2061 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
2062 | &mac_addr_offset); | 2062 | &mac_addr_offset); |
2063 | if (ret_val) { | 2063 | if (ret_val) { |
2064 | hw_dbg(hw, "NVM Read Error\n"); | 2064 | e_dbg("NVM Read Error\n"); |
2065 | return ret_val; | 2065 | return ret_val; |
2066 | } | 2066 | } |
2067 | if (mac_addr_offset == 0xFFFF) | 2067 | if (mac_addr_offset == 0xFFFF) |
@@ -2076,7 +2076,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2076 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | 2076 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, |
2077 | &nvm_data); | 2077 | &nvm_data); |
2078 | if (ret_val) { | 2078 | if (ret_val) { |
2079 | hw_dbg(hw, "NVM Read Error\n"); | 2079 | e_dbg("NVM Read Error\n"); |
2080 | return ret_val; | 2080 | return ret_val; |
2081 | } | 2081 | } |
2082 | if (nvm_data & 0x0001) | 2082 | if (nvm_data & 0x0001) |
@@ -2091,7 +2091,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2091 | offset = mac_addr_offset + (i >> 1); | 2091 | offset = mac_addr_offset + (i >> 1); |
2092 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | 2092 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); |
2093 | if (ret_val) { | 2093 | if (ret_val) { |
2094 | hw_dbg(hw, "NVM Read Error\n"); | 2094 | e_dbg("NVM Read Error\n"); |
2095 | return ret_val; | 2095 | return ret_val; |
2096 | } | 2096 | } |
2097 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); | 2097 | hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); |
@@ -2124,14 +2124,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | |||
2124 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 2124 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
2125 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2125 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2126 | if (ret_val) { | 2126 | if (ret_val) { |
2127 | hw_dbg(hw, "NVM Read Error\n"); | 2127 | e_dbg("NVM Read Error\n"); |
2128 | return ret_val; | 2128 | return ret_val; |
2129 | } | 2129 | } |
2130 | checksum += nvm_data; | 2130 | checksum += nvm_data; |
2131 | } | 2131 | } |
2132 | 2132 | ||
2133 | if (checksum != (u16) NVM_SUM) { | 2133 | if (checksum != (u16) NVM_SUM) { |
2134 | hw_dbg(hw, "NVM Checksum Invalid\n"); | 2134 | e_dbg("NVM Checksum Invalid\n"); |
2135 | return -E1000_ERR_NVM; | 2135 | return -E1000_ERR_NVM; |
2136 | } | 2136 | } |
2137 | 2137 | ||
@@ -2155,7 +2155,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2155 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | 2155 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
2156 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | 2156 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
2157 | if (ret_val) { | 2157 | if (ret_val) { |
2158 | hw_dbg(hw, "NVM Read Error while updating checksum.\n"); | 2158 | e_dbg("NVM Read Error while updating checksum.\n"); |
2159 | return ret_val; | 2159 | return ret_val; |
2160 | } | 2160 | } |
2161 | checksum += nvm_data; | 2161 | checksum += nvm_data; |
@@ -2163,7 +2163,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | |||
2163 | checksum = (u16) NVM_SUM - checksum; | 2163 | checksum = (u16) NVM_SUM - checksum; |
2164 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | 2164 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
2165 | if (ret_val) | 2165 | if (ret_val) |
2166 | hw_dbg(hw, "NVM Write Error while updating checksum.\n"); | 2166 | e_dbg("NVM Write Error while updating checksum.\n"); |
2167 | 2167 | ||
2168 | return ret_val; | 2168 | return ret_val; |
2169 | } | 2169 | } |
@@ -2226,7 +2226,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2226 | /* Check that the host interface is enabled. */ | 2226 | /* Check that the host interface is enabled. */ |
2227 | hicr = er32(HICR); | 2227 | hicr = er32(HICR); |
2228 | if ((hicr & E1000_HICR_EN) == 0) { | 2228 | if ((hicr & E1000_HICR_EN) == 0) { |
2229 | hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); | 2229 | e_dbg("E1000_HOST_EN bit disabled.\n"); |
2230 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2230 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2231 | } | 2231 | } |
2232 | /* check the previous command is completed */ | 2232 | /* check the previous command is completed */ |
@@ -2238,7 +2238,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2238 | } | 2238 | } |
2239 | 2239 | ||
2240 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 2240 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
2241 | hw_dbg(hw, "Previous command timeout failed .\n"); | 2241 | e_dbg("Previous command timeout failed .\n"); |
2242 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 2242 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
2243 | } | 2243 | } |
2244 | 2244 | ||
@@ -2509,14 +2509,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) | |||
2509 | 2509 | ||
2510 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | 2510 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
2511 | if (ret_val) { | 2511 | if (ret_val) { |
2512 | hw_dbg(hw, "NVM Read Error\n"); | 2512 | e_dbg("NVM Read Error\n"); |
2513 | return ret_val; | 2513 | return ret_val; |
2514 | } | 2514 | } |
2515 | *pba_num = (u32)(nvm_data << 16); | 2515 | *pba_num = (u32)(nvm_data << 16); |
2516 | 2516 | ||
2517 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2517 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2518 | if (ret_val) { | 2518 | if (ret_val) { |
2519 | hw_dbg(hw, "NVM Read Error\n"); | 2519 | e_dbg("NVM Read Error\n"); |
2520 | return ret_val; | 2520 | return ret_val; |
2521 | } | 2521 | } |
2522 | *pba_num |= nvm_data; | 2522 | *pba_num |= nvm_data; |