aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
authorBruce Allan <bruce.w.allan@intel.com>2008-03-28 12:15:03 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-28 22:14:56 -0400
commitad68076e07fa01bd0c98278a959d0fd2bb26f1ac (patch)
treef0b664ecdb38478f9b995aff10dcb39a09221fb6 /drivers/net/e1000e
parent652f093fdf14c7ca1e13c052da429ae385e4dc21 (diff)
e1000e: reformat comment blocks, cosmetic changes only
Adjusting the comment blocks here to be code-style compliant. no code changes. Changed some copyright dates to 2008. Indentation fixes. Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c105
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h109
-rw-r--r--drivers/net/e1000e/e1000.h16
-rw-r--r--drivers/net/e1000e/es2lan.c89
-rw-r--r--drivers/net/e1000e/ethtool.c97
-rw-r--r--drivers/net/e1000e/hw.h145
-rw-r--r--drivers/net/e1000e/ich8lan.c262
-rw-r--r--drivers/net/e1000e/lib.c213
-rw-r--r--drivers/net/e1000e/netdev.c428
-rw-r--r--drivers/net/e1000e/param.c33
-rw-r--r--drivers/net/e1000e/phy.c152
12 files changed, 1009 insertions, 642 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7fe20310eb5f..c58dc2e8de1e 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,9 @@
29/* 29/*
30 * 82571EB Gigabit Ethernet Controller 30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Fiber) 31 * 82571EB Gigabit Ethernet Controller (Fiber)
32 * 82571EB Dual Port Gigabit Mezzanine Adapter
33 * 82571EB Quad Port Gigabit Mezzanine Adapter
34 * 82571PT Gigabit PT Quad Port Server ExpressModule
32 * 82572EI Gigabit Ethernet Controller (Copper) 35 * 82572EI Gigabit Ethernet Controller (Copper)
33 * 82572EI Gigabit Ethernet Controller (Fiber) 36 * 82572EI Gigabit Ethernet Controller (Fiber)
34 * 82572EI Gigabit Ethernet Controller 37 * 82572EI Gigabit Ethernet Controller
@@ -150,7 +153,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
150 if (((eecd >> 15) & 0x3) == 0x3) { 153 if (((eecd >> 15) & 0x3) == 0x3) {
151 nvm->type = e1000_nvm_flash_hw; 154 nvm->type = e1000_nvm_flash_hw;
152 nvm->word_size = 2048; 155 nvm->word_size = 2048;
153 /* Autonomous Flash update bit must be cleared due 156 /*
157 * Autonomous Flash update bit must be cleared due
154 * to Flash update issue. 158 * to Flash update issue.
155 */ 159 */
156 eecd &= ~E1000_EECD_AUPDEN; 160 eecd &= ~E1000_EECD_AUPDEN;
@@ -159,10 +163,11 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
159 } 163 }
160 /* Fall Through */ 164 /* Fall Through */
161 default: 165 default:
162 nvm->type = e1000_nvm_eeprom_spi; 166 nvm->type = e1000_nvm_eeprom_spi;
163 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 167 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
164 E1000_EECD_SIZE_EX_SHIFT); 168 E1000_EECD_SIZE_EX_SHIFT);
165 /* Added to a constant, "size" becomes the left-shift value 169 /*
170 * Added to a constant, "size" becomes the left-shift value
166 * for setting word_size. 171 * for setting word_size.
167 */ 172 */
168 size += NVM_WORD_SIZE_BASE_SHIFT; 173 size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -208,8 +213,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
208 /* Set rar entry count */ 213 /* Set rar entry count */
209 mac->rar_entry_count = E1000_RAR_ENTRIES; 214 mac->rar_entry_count = E1000_RAR_ENTRIES;
210 /* Set if manageability features are enabled. */ 215 /* Set if manageability features are enabled. */
211 mac->arc_subsystem_valid = 216 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
212 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
213 217
214 /* check for link */ 218 /* check for link */
215 switch (hw->media_type) { 219 switch (hw->media_type) {
@@ -219,14 +223,18 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
219 func->get_link_up_info = e1000e_get_speed_and_duplex_copper; 223 func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
220 break; 224 break;
221 case e1000_media_type_fiber: 225 case e1000_media_type_fiber:
222 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 226 func->setup_physical_interface =
227 e1000_setup_fiber_serdes_link_82571;
223 func->check_for_link = e1000e_check_for_fiber_link; 228 func->check_for_link = e1000e_check_for_fiber_link;
224 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 229 func->get_link_up_info =
230 e1000e_get_speed_and_duplex_fiber_serdes;
225 break; 231 break;
226 case e1000_media_type_internal_serdes: 232 case e1000_media_type_internal_serdes:
227 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 233 func->setup_physical_interface =
234 e1000_setup_fiber_serdes_link_82571;
228 func->check_for_link = e1000e_check_for_serdes_link; 235 func->check_for_link = e1000e_check_for_serdes_link;
229 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 236 func->get_link_up_info =
237 e1000e_get_speed_and_duplex_fiber_serdes;
230 break; 238 break;
231 default: 239 default:
232 return -E1000_ERR_CONFIG; 240 return -E1000_ERR_CONFIG;
@@ -322,10 +330,12 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
322 switch (hw->mac.type) { 330 switch (hw->mac.type) {
323 case e1000_82571: 331 case e1000_82571:
324 case e1000_82572: 332 case e1000_82572:
325 /* The 82571 firmware may still be configuring the PHY. 333 /*
334 * The 82571 firmware may still be configuring the PHY.
326 * In this case, we cannot access the PHY until the 335 * In this case, we cannot access the PHY until the
327 * configuration is done. So we explicitly set the 336 * configuration is done. So we explicitly set the
328 * PHY ID. */ 337 * PHY ID.
338 */
329 phy->id = IGP01E1000_I_PHY_ID; 339 phy->id = IGP01E1000_I_PHY_ID;
330 break; 340 break;
331 case e1000_82573: 341 case e1000_82573:
@@ -479,8 +489,10 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
479 if (ret_val) 489 if (ret_val)
480 return ret_val; 490 return ret_val;
481 491
482 /* If our nvm is an EEPROM, then we're done 492 /*
483 * otherwise, commit the checksum to the flash NVM. */ 493 * If our nvm is an EEPROM, then we're done
494 * otherwise, commit the checksum to the flash NVM.
495 */
484 if (hw->nvm.type != e1000_nvm_flash_hw) 496 if (hw->nvm.type != e1000_nvm_flash_hw)
485 return ret_val; 497 return ret_val;
486 498
@@ -496,7 +508,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
496 508
497 /* Reset the firmware if using STM opcode. */ 509 /* Reset the firmware if using STM opcode. */
498 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { 510 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
499 /* The enabling of and the actual reset must be done 511 /*
512 * The enabling of and the actual reset must be done
500 * in two write cycles. 513 * in two write cycles.
501 */ 514 */
502 ew32(HICR, E1000_HICR_FW_RESET_ENABLE); 515 ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -557,8 +570,10 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
557 u32 eewr = 0; 570 u32 eewr = 0;
558 s32 ret_val = 0; 571 s32 ret_val = 0;
559 572
560 /* A check for invalid values: offset too large, too many words, 573 /*
561 * and not enough words. */ 574 * A check for invalid values: offset too large, too many words,
575 * and not enough words.
576 */
562 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 577 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
563 (words == 0)) { 578 (words == 0)) {
564 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 579 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -645,30 +660,32 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
645 } else { 660 } else {
646 data &= ~IGP02E1000_PM_D0_LPLU; 661 data &= ~IGP02E1000_PM_D0_LPLU;
647 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 662 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
648 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 663 /*
664 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
649 * during Dx states where the power conservation is most 665 * during Dx states where the power conservation is most
650 * important. During driver activity we should enable 666 * important. During driver activity we should enable
651 * SmartSpeed, so performance is maintained. */ 667 * SmartSpeed, so performance is maintained.
668 */
652 if (phy->smart_speed == e1000_smart_speed_on) { 669 if (phy->smart_speed == e1000_smart_speed_on) {
653 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 670 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
654 &data); 671 &data);
655 if (ret_val) 672 if (ret_val)
656 return ret_val; 673 return ret_val;
657 674
658 data |= IGP01E1000_PSCFR_SMART_SPEED; 675 data |= IGP01E1000_PSCFR_SMART_SPEED;
659 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 676 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
660 data); 677 data);
661 if (ret_val) 678 if (ret_val)
662 return ret_val; 679 return ret_val;
663 } else if (phy->smart_speed == e1000_smart_speed_off) { 680 } else if (phy->smart_speed == e1000_smart_speed_off) {
664 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 681 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
665 &data); 682 &data);
666 if (ret_val) 683 if (ret_val)
667 return ret_val; 684 return ret_val;
668 685
669 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 686 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
670 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 687 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
671 data); 688 data);
672 if (ret_val) 689 if (ret_val)
673 return ret_val; 690 return ret_val;
674 } 691 }
@@ -693,7 +710,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
693 s32 ret_val; 710 s32 ret_val;
694 u16 i = 0; 711 u16 i = 0;
695 712
696 /* Prevent the PCI-E bus from sticking if there is no TLP connection 713 /*
714 * Prevent the PCI-E bus from sticking if there is no TLP connection
697 * on the last TLP read/write transaction when MAC is reset. 715 * on the last TLP read/write transaction when MAC is reset.
698 */ 716 */
699 ret_val = e1000e_disable_pcie_master(hw); 717 ret_val = e1000e_disable_pcie_master(hw);
@@ -709,8 +727,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
709 727
710 msleep(10); 728 msleep(10);
711 729
712 /* Must acquire the MDIO ownership before MAC reset. 730 /*
713 * Ownership defaults to firmware after a reset. */ 731 * Must acquire the MDIO ownership before MAC reset.
732 * Ownership defaults to firmware after a reset.
733 */
714 if (hw->mac.type == e1000_82573) { 734 if (hw->mac.type == e1000_82573) {
715 extcnf_ctrl = er32(EXTCNF_CTRL); 735 extcnf_ctrl = er32(EXTCNF_CTRL);
716 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 736 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -747,7 +767,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
747 /* We don't want to continue accessing MAC registers. */ 767 /* We don't want to continue accessing MAC registers. */
748 return ret_val; 768 return ret_val;
749 769
750 /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. 770 /*
771 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
751 * Need to wait for Phy configuration completion before accessing 772 * Need to wait for Phy configuration completion before accessing
752 * NVM and Phy. 773 * NVM and Phy.
753 */ 774 */
@@ -793,7 +814,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
793 e1000e_clear_vfta(hw); 814 e1000e_clear_vfta(hw);
794 815
795 /* Setup the receive address. */ 816 /* Setup the receive address. */
796 /* If, however, a locally administered address was assigned to the 817 /*
818 * If, however, a locally administered address was assigned to the
797 * 82571, we must reserve a RAR for it to work around an issue where 819 * 82571, we must reserve a RAR for it to work around an issue where
798 * resetting one port will reload the MAC on the other port. 820 * resetting one port will reload the MAC on the other port.
799 */ 821 */
@@ -830,7 +852,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
830 ew32(GCR, reg_data); 852 ew32(GCR, reg_data);
831 } 853 }
832 854
833 /* Clear all of the statistics registers (clear on read). It is 855 /*
856 * Clear all of the statistics registers (clear on read). It is
834 * important that we do this after we have tried to establish link 857 * important that we do this after we have tried to establish link
835 * because the symbol error count will increment wildly if there 858 * because the symbol error count will increment wildly if there
836 * is no link. 859 * is no link.
@@ -922,7 +945,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
922 945
923 if (hw->mac.type == e1000_82573) { 946 if (hw->mac.type == e1000_82573) {
924 if (hw->mng_cookie.vlan_id != 0) { 947 if (hw->mng_cookie.vlan_id != 0) {
925 /* The VFTA is a 4096b bit-field, each identifying 948 /*
949 * The VFTA is a 4096b bit-field, each identifying
926 * a single VLAN ID. The following operations 950 * a single VLAN ID. The following operations
927 * determine which 32b entry (i.e. offset) into the 951 * determine which 32b entry (i.e. offset) into the
928 * array we want to set the VLAN ID (i.e. bit) of 952 * array we want to set the VLAN ID (i.e. bit) of
@@ -936,7 +960,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
936 } 960 }
937 } 961 }
938 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 962 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
939 /* If the offset we want to clear is the same offset of the 963 /*
964 * If the offset we want to clear is the same offset of the
940 * manageability VLAN ID, then clear all bits except that of 965 * manageability VLAN ID, then clear all bits except that of
941 * the manageability unit. 966 * the manageability unit.
942 */ 967 */
@@ -984,7 +1009,8 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
984 **/ 1009 **/
985static s32 e1000_setup_link_82571(struct e1000_hw *hw) 1010static s32 e1000_setup_link_82571(struct e1000_hw *hw)
986{ 1011{
987 /* 82573 does not have a word in the NVM to determine 1012 /*
1013 * 82573 does not have a word in the NVM to determine
988 * the default flow control setting, so we explicitly 1014 * the default flow control setting, so we explicitly
989 * set it to full. 1015 * set it to full.
990 */ 1016 */
@@ -1050,14 +1076,14 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1050 switch (hw->mac.type) { 1076 switch (hw->mac.type) {
1051 case e1000_82571: 1077 case e1000_82571:
1052 case e1000_82572: 1078 case e1000_82572:
1053 /* If SerDes loopback mode is entered, there is no form 1079 /*
1080 * If SerDes loopback mode is entered, there is no form
1054 * of reset to take the adapter out of that mode. So we 1081 * of reset to take the adapter out of that mode. So we
1055 * have to explicitly take the adapter out of loopback 1082 * have to explicitly take the adapter out of loopback
1056 * mode. This prevents drivers from twiddling their thumbs 1083 * mode. This prevents drivers from twiddling their thumbs
1057 * if another tool failed to take it out of loopback mode. 1084 * if another tool failed to take it out of loopback mode.
1058 */ 1085 */
1059 ew32(SCTL, 1086 ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1060 E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1061 break; 1087 break;
1062 default: 1088 default:
1063 break; 1089 break;
@@ -1124,7 +1150,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1124 1150
1125 /* If workaround is activated... */ 1151 /* If workaround is activated... */
1126 if (state) 1152 if (state)
1127 /* Hold a copy of the LAA in RAR[14] This is done so that 1153 /*
1154 * Hold a copy of the LAA in RAR[14] This is done so that
1128 * between the time RAR[0] gets clobbered and the time it 1155 * between the time RAR[0] gets clobbered and the time it
1129 * gets fixed, the actual LAA is in one of the RARs and no 1156 * gets fixed, the actual LAA is in one of the RARs and no
1130 * incoming packets directed to this port are dropped. 1157 * incoming packets directed to this port are dropped.
@@ -1152,7 +1179,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1152 if (nvm->type != e1000_nvm_flash_hw) 1179 if (nvm->type != e1000_nvm_flash_hw)
1153 return 0; 1180 return 0;
1154 1181
1155 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 1182 /*
1183 * Check bit 4 of word 10h. If it is 0, firmware is done updating
1156 * 10h-12h. Checksum may need to be fixed. 1184 * 10h-12h. Checksum may need to be fixed.
1157 */ 1185 */
1158 ret_val = e1000_read_nvm(hw, 0x10, 1, &data); 1186 ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1160,7 +1188,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1160 return ret_val; 1188 return ret_val;
1161 1189
1162 if (!(data & 0x10)) { 1190 if (!(data & 0x10)) {
1163 /* Read 0x23 and check bit 15. This bit is a 1 1191 /*
1192 * Read 0x23 and check bit 15. This bit is a 1
1164 * when the checksum has already been fixed. If 1193 * when the checksum has already been fixed. If
1165 * the checksum is still wrong and this bit is a 1194 * the checksum is still wrong and this bit is a
1166 * 1, we need to return bad checksum. Otherwise, 1195 * 1, we need to return bad checksum. Otherwise,
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 650f866e7ac2..360c91369f35 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation. 4# Copyright(c) 1999 - 2008 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index a4f511f549f7..572cfd44397a 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -120,10 +120,10 @@
120#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 120#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
121#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 121#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
122#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 122#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
123#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 123/* Enable MAC address filtering */
124 * filtering */ 124#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
125#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 125/* Enable MNG packets to host memory */
126 * memory */ 126#define E1000_MANC_EN_MNG2HOST 0x00200000
127 127
128/* Receive Control */ 128/* Receive Control */
129#define E1000_RCTL_EN 0x00000002 /* enable */ 129#define E1000_RCTL_EN 0x00000002 /* enable */
@@ -135,25 +135,26 @@
135#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 135#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
136#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 136#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
137#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 137#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
138#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 138#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
139#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 139#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
140#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 140#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
141/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 141/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
142#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 142#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
143#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 143#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
144#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 144#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
145#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 145#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
146/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ 146/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
147#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ 147#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
148#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ 148#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
149#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ 149#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
150#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 150#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
151#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 151#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
152#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 152#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
153#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 153#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
154#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 154#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
155 155
156/* Use byte values for the following shift parameters 156/*
157 * Use byte values for the following shift parameters
157 * Usage: 158 * Usage:
158 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 159 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
159 * E1000_PSRCTL_BSIZE0_MASK) | 160 * E1000_PSRCTL_BSIZE0_MASK) |
@@ -206,7 +207,8 @@
206#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 207#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
207#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 208#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
208 209
209/* Bit definitions for the Management Data IO (MDIO) and Management Data 210/*
211 * Bit definitions for the Management Data IO (MDIO) and Management Data
210 * Clock (MDC) pins in the Device Control Register. 212 * Clock (MDC) pins in the Device Control Register.
211 */ 213 */
212 214
@@ -279,7 +281,7 @@
279#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 281#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
280 282
281/* Transmit Control */ 283/* Transmit Control */
282#define E1000_TCTL_EN 0x00000002 /* enable tx */ 284#define E1000_TCTL_EN 0x00000002 /* enable Tx */
283#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ 285#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
284#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ 286#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
285#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 287#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
@@ -337,8 +339,8 @@
337#define E1000_KABGTXD_BGSQLBIAS 0x00050000 339#define E1000_KABGTXD_BGSQLBIAS 0x00050000
338 340
339/* PBA constants */ 341/* PBA constants */
340#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ 342#define E1000_PBA_8K 0x0008 /* 8KB */
341#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 343#define E1000_PBA_16K 0x0010 /* 16KB */
342 344
343#define E1000_PBS_16K E1000_PBA_16K 345#define E1000_PBS_16K E1000_PBA_16K
344 346
@@ -356,12 +358,13 @@
356/* Interrupt Cause Read */ 358/* Interrupt Cause Read */
357#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 359#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
358#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 360#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
359#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 361#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
360#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 362#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
361#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 363#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
362#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 364#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
363 365
364/* This defines the bits that are set in the Interrupt Mask 366/*
367 * This defines the bits that are set in the Interrupt Mask
365 * Set/Read Register. Each bit is documented below: 368 * Set/Read Register. Each bit is documented below:
366 * o RXT0 = Receiver Timer Interrupt (ring 0) 369 * o RXT0 = Receiver Timer Interrupt (ring 0)
367 * o TXDW = Transmit Descriptor Written Back 370 * o TXDW = Transmit Descriptor Written Back
@@ -379,21 +382,22 @@
379/* Interrupt Mask Set */ 382/* Interrupt Mask Set */
380#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 383#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
381#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 384#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
382#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 385#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
383#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 386#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
384#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 387#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
385 388
386/* Interrupt Cause Set */ 389/* Interrupt Cause Set */
387#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 390#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
388#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 391#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
392#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
389 393
390/* Transmit Descriptor Control */ 394/* Transmit Descriptor Control */
391#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 395#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
392#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 396#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
393#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 397#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
394#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ 398#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
395#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 399/* Enable the counting of desc. still to be processed. */
396 still to be processed. */ 400#define E1000_TXDCTL_COUNT_DESC 0x00400000
397 401
398/* Flow Control Constants */ 402/* Flow Control Constants */
399#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 403#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
@@ -404,7 +408,8 @@
404#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 408#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
405 409
406/* Receive Address */ 410/* Receive Address */
407/* Number of high/low register pairs in the RAR. The RAR (Receive Address 411/*
412 * Number of high/low register pairs in the RAR. The RAR (Receive Address
408 * Registers) holds the directed and multicast addresses that we monitor. 413 * Registers) holds the directed and multicast addresses that we monitor.
409 * Technically, we have 16 spots. However, we reserve one of these spots 414 * Technically, we have 16 spots. However, we reserve one of these spots
410 * (RAR[15]) for our directed address used by controllers with 415 * (RAR[15]) for our directed address used by controllers with
@@ -533,8 +538,8 @@
533#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ 538#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
534#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ 539#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
535#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ 540#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
536#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type 541/* NVM Addressing bits based on type (0-small, 1-large) */
537 * (0-small, 1-large) */ 542#define E1000_EECD_ADDR_BITS 0x00000400
538#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ 543#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
539#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ 544#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
540#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ 545#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
@@ -626,7 +631,8 @@
626#define MAX_PHY_MULTI_PAGE_REG 0xF 631#define MAX_PHY_MULTI_PAGE_REG 0xF
627 632
628/* Bit definitions for valid PHY IDs. */ 633/* Bit definitions for valid PHY IDs. */
629/* I = Integrated 634/*
635 * I = Integrated
630 * E = External 636 * E = External
631 */ 637 */
632#define M88E1000_E_PHY_ID 0x01410C50 638#define M88E1000_E_PHY_ID 0x01410C50
@@ -653,37 +659,37 @@
653#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ 659#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
654 /* Manual MDI configuration */ 660 /* Manual MDI configuration */
655#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ 661#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
656#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 662/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
657 * 100BASE-TX/10BASE-T: 663#define M88E1000_PSCR_AUTO_X_1000T 0x0040
658 * MDI Mode 664/* Auto crossover enabled all speeds */
659 */ 665#define M88E1000_PSCR_AUTO_X_MODE 0x0060
660#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled 666/*
661 * all speeds. 667 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
662 */ 668 * 0=Normal 10BASE-T Rx Threshold
663 /* 1=Enable Extended 10BASE-T distance 669 */
664 * (Lower 10BASE-T RX Threshold) 670#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
665 * 0=Normal 10BASE-T RX Threshold */
666 /* 1=5-Bit interface in 100BASE-TX
667 * 0=MII interface in 100BASE-TX */
668#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
669 671
670/* M88E1000 PHY Specific Status Register */ 672/* M88E1000 PHY Specific Status Register */
671#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 673#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
672#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 674#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
673#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 675#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
674#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; 676/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
675 * 3=110-140M;4=>140M */ 677#define M88E1000_PSSR_CABLE_LENGTH 0x0380
676#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ 678#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
677#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 679#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
678 680
679#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 681#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
680 682
681/* Number of times we will attempt to autonegotiate before downshifting if we 683/*
682 * are the master */ 684 * Number of times we will attempt to autonegotiate before downshifting if we
685 * are the master
686 */
683#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 687#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
684#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 688#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
685/* Number of times we will attempt to autonegotiate before downshifting if we 689/*
686 * are the slave */ 690 * Number of times we will attempt to autonegotiate before downshifting if we
691 * are the slave
692 */
687#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 693#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
688#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 694#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
689#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 695#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
@@ -692,7 +698,8 @@
692#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 698#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
693#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 699#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
694 700
695/* Bits... 701/*
702 * Bits...
696 * 15-5: page 703 * 15-5: page
697 * 4-0: register offset 704 * 4-0: register offset
698 */ 705 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 4bf0c6c045c0..ffba63c95f85 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -61,7 +61,7 @@ struct e1000_info;
61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 61 ndev_printk(KERN_NOTICE , netdev, format, ## arg)
62 62
63 63
64/* TX/RX descriptor defines */ 64/* Tx/Rx descriptor defines */
65#define E1000_DEFAULT_TXD 256 65#define E1000_DEFAULT_TXD 256
66#define E1000_MAX_TXD 4096 66#define E1000_MAX_TXD 4096
67#define E1000_MIN_TXD 80 67#define E1000_MIN_TXD 80
@@ -114,13 +114,13 @@ struct e1000_buffer {
114 dma_addr_t dma; 114 dma_addr_t dma;
115 struct sk_buff *skb; 115 struct sk_buff *skb;
116 union { 116 union {
117 /* TX */ 117 /* Tx */
118 struct { 118 struct {
119 unsigned long time_stamp; 119 unsigned long time_stamp;
120 u16 length; 120 u16 length;
121 u16 next_to_watch; 121 u16 next_to_watch;
122 }; 122 };
123 /* RX */ 123 /* Rx */
124 /* arrays of page information for packet split */ 124 /* arrays of page information for packet split */
125 struct e1000_ps_page *ps_pages; 125 struct e1000_ps_page *ps_pages;
126 }; 126 };
@@ -177,7 +177,7 @@ struct e1000_adapter {
177 u16 rx_itr; 177 u16 rx_itr;
178 178
179 /* 179 /*
180 * TX 180 * Tx
181 */ 181 */
182 struct e1000_ring *tx_ring /* One per active queue */ 182 struct e1000_ring *tx_ring /* One per active queue */
183 ____cacheline_aligned_in_smp; 183 ____cacheline_aligned_in_smp;
@@ -199,7 +199,7 @@ struct e1000_adapter {
199 unsigned int total_rx_bytes; 199 unsigned int total_rx_bytes;
200 unsigned int total_rx_packets; 200 unsigned int total_rx_packets;
201 201
202 /* TX stats */ 202 /* Tx stats */
203 u64 tpt_old; 203 u64 tpt_old;
204 u64 colc_old; 204 u64 colc_old;
205 u64 gotcl_old; 205 u64 gotcl_old;
@@ -211,7 +211,7 @@ struct e1000_adapter {
211 u32 tx_dma_failed; 211 u32 tx_dma_failed;
212 212
213 /* 213 /*
214 * RX 214 * Rx
215 */ 215 */
216 bool (*clean_rx) (struct e1000_adapter *adapter, 216 bool (*clean_rx) (struct e1000_adapter *adapter,
217 int *work_done, int work_to_do) 217 int *work_done, int work_to_do)
@@ -223,7 +223,7 @@ struct e1000_adapter {
223 u32 rx_int_delay; 223 u32 rx_int_delay;
224 u32 rx_abs_int_delay; 224 u32 rx_abs_int_delay;
225 225
226 /* RX stats */ 226 /* Rx stats */
227 u64 hw_csum_err; 227 u64 hw_csum_err;
228 u64 hw_csum_good; 228 u64 hw_csum_good;
229 u64 rx_hdr_split; 229 u64 rx_hdr_split;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 88657adf965f..265775447538 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -92,7 +92,8 @@
92/* In-Band Control Register (Page 194, Register 18) */ 92/* In-Band Control Register (Page 194, Register 18) */
93#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ 93#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
94 94
95/* A table for the GG82563 cable length where the range is defined 95/*
96 * A table for the GG82563 cable length where the range is defined
96 * with a lower bound at "index" and the upper bound at 97 * with a lower bound at "index" and the upper bound at
97 * "index + 5". 98 * "index + 5".
98 */ 99 */
@@ -167,12 +168,13 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
167 break; 168 break;
168 } 169 }
169 170
170 nvm->type = e1000_nvm_eeprom_spi; 171 nvm->type = e1000_nvm_eeprom_spi;
171 172
172 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 173 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
173 E1000_EECD_SIZE_EX_SHIFT); 174 E1000_EECD_SIZE_EX_SHIFT);
174 175
175 /* Added to a constant, "size" becomes the left-shift value 176 /*
177 * Added to a constant, "size" becomes the left-shift value
176 * for setting word_size. 178 * for setting word_size.
177 */ 179 */
178 size += NVM_WORD_SIZE_BASE_SHIFT; 180 size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -208,8 +210,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
208 /* Set rar entry count */ 210 /* Set rar entry count */
209 mac->rar_entry_count = E1000_RAR_ENTRIES; 211 mac->rar_entry_count = E1000_RAR_ENTRIES;
210 /* Set if manageability features are enabled. */ 212 /* Set if manageability features are enabled. */
211 mac->arc_subsystem_valid = 213 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
212 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
213 214
214 /* check for link */ 215 /* check for link */
215 switch (hw->media_type) { 216 switch (hw->media_type) {
@@ -344,8 +345,10 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
344 if (!(swfw_sync & (fwmask | swmask))) 345 if (!(swfw_sync & (fwmask | swmask)))
345 break; 346 break;
346 347
347 /* Firmware currently using resource (fwmask) 348 /*
348 * or other software thread using resource (swmask) */ 349 * Firmware currently using resource (fwmask)
350 * or other software thread using resource (swmask)
351 */
349 e1000e_put_hw_semaphore(hw); 352 e1000e_put_hw_semaphore(hw);
350 mdelay(5); 353 mdelay(5);
351 i++; 354 i++;
@@ -407,7 +410,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
407 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 410 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
408 page_select = GG82563_PHY_PAGE_SELECT; 411 page_select = GG82563_PHY_PAGE_SELECT;
409 else 412 else
410 /* Use Alternative Page Select register to access 413 /*
414 * Use Alternative Page Select register to access
411 * registers 30 and 31 415 * registers 30 and 31
412 */ 416 */
413 page_select = GG82563_PHY_PAGE_SELECT_ALT; 417 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -417,7 +421,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
417 if (ret_val) 421 if (ret_val)
418 return ret_val; 422 return ret_val;
419 423
420 /* The "ready" bit in the MDIC register may be incorrectly set 424 /*
425 * The "ready" bit in the MDIC register may be incorrectly set
421 * before the device has completed the "Page Select" MDI 426 * before the device has completed the "Page Select" MDI
422 * transaction. So we wait 200us after each MDI command... 427 * transaction. So we wait 200us after each MDI command...
423 */ 428 */
@@ -462,7 +467,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
462 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 467 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
463 page_select = GG82563_PHY_PAGE_SELECT; 468 page_select = GG82563_PHY_PAGE_SELECT;
464 else 469 else
465 /* Use Alternative Page Select register to access 470 /*
471 * Use Alternative Page Select register to access
466 * registers 30 and 31 472 * registers 30 and 31
467 */ 473 */
468 page_select = GG82563_PHY_PAGE_SELECT_ALT; 474 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -473,7 +479,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
473 return ret_val; 479 return ret_val;
474 480
475 481
476 /* The "ready" bit in the MDIC register may be incorrectly set 482 /*
483 * The "ready" bit in the MDIC register may be incorrectly set
477 * before the device has completed the "Page Select" MDI 484 * before the device has completed the "Page Select" MDI
478 * transaction. So we wait 200us after each MDI command... 485 * transaction. So we wait 200us after each MDI command...
479 */ 486 */
@@ -554,7 +561,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
554 u16 phy_data; 561 u16 phy_data;
555 bool link; 562 bool link;
556 563
557 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 564 /*
565 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
558 * forced whenever speed and duplex are forced. 566 * forced whenever speed and duplex are forced.
559 */ 567 */
560 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 568 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -593,7 +601,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
593 return ret_val; 601 return ret_val;
594 602
595 if (!link) { 603 if (!link) {
596 /* We didn't get link. 604 /*
605 * We didn't get link.
597 * Reset the DSP and cross our fingers. 606 * Reset the DSP and cross our fingers.
598 */ 607 */
599 ret_val = e1000e_phy_reset_dsp(hw); 608 ret_val = e1000e_phy_reset_dsp(hw);
@@ -612,7 +621,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
612 if (ret_val) 621 if (ret_val)
613 return ret_val; 622 return ret_val;
614 623
615 /* Resetting the phy means we need to verify the TX_CLK corresponds 624 /*
625 * Resetting the phy means we need to verify the TX_CLK corresponds
616 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. 626 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
617 */ 627 */
618 phy_data &= ~GG82563_MSCR_TX_CLK_MASK; 628 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -621,7 +631,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
621 else 631 else
622 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; 632 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
623 633
624 /* In addition, we must re-enable CRS on Tx for both half and full 634 /*
635 * In addition, we must re-enable CRS on Tx for both half and full
625 * duplex. 636 * duplex.
626 */ 637 */
627 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 638 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -704,7 +715,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
704 u32 icr; 715 u32 icr;
705 s32 ret_val; 716 s32 ret_val;
706 717
707 /* Prevent the PCI-E bus from sticking if there is no TLP connection 718 /*
719 * Prevent the PCI-E bus from sticking if there is no TLP connection
708 * on the last TLP read/write transaction when MAC is reset. 720 * on the last TLP read/write transaction when MAC is reset.
709 */ 721 */
710 ret_val = e1000e_disable_pcie_master(hw); 722 ret_val = e1000e_disable_pcie_master(hw);
@@ -808,7 +820,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
808 reg_data &= ~0x00100000; 820 reg_data &= ~0x00100000;
809 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); 821 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
810 822
811 /* Clear all of the statistics registers (clear on read). It is 823 /*
824 * Clear all of the statistics registers (clear on read). It is
812 * important that we do this after we have tried to establish link 825 * important that we do this after we have tried to establish link
813 * because the symbol error count will increment wildly if there 826 * because the symbol error count will increment wildly if there
814 * is no link. 827 * is no link.
@@ -881,7 +894,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
881 if (ret_val) 894 if (ret_val)
882 return ret_val; 895 return ret_val;
883 896
884 /* Options: 897 /*
898 * Options:
885 * MDI/MDI-X = 0 (default) 899 * MDI/MDI-X = 0 (default)
886 * 0 - Auto for all speeds 900 * 0 - Auto for all speeds
887 * 1 - MDI mode 901 * 1 - MDI mode
@@ -907,7 +921,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
907 break; 921 break;
908 } 922 }
909 923
910 /* Options: 924 /*
925 * Options:
911 * disable_polarity_correction = 0 (default) 926 * disable_polarity_correction = 0 (default)
912 * Automatic Correction for Reversed Cable Polarity 927 * Automatic Correction for Reversed Cable Polarity
913 * 0 - Disabled 928 * 0 - Disabled
@@ -928,10 +943,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
928 return ret_val; 943 return ret_val;
929 } 944 }
930 945
931 /* Bypass RX and TX FIFO's */ 946 /* Bypass Rx and Tx FIFO's */
932 ret_val = e1000e_write_kmrn_reg(hw, 947 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
933 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 948 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
934 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
935 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 949 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
936 if (ret_val) 950 if (ret_val)
937 return ret_val; 951 return ret_val;
@@ -953,7 +967,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
953 if (ret_val) 967 if (ret_val)
954 return ret_val; 968 return ret_val;
955 969
956 /* Do not init these registers when the HW is in IAMT mode, since the 970 /*
971 * Do not init these registers when the HW is in IAMT mode, since the
957 * firmware will have already initialized them. We only initialize 972 * firmware will have already initialized them. We only initialize
958 * them if the HW is not in IAMT mode. 973 * them if the HW is not in IAMT mode.
959 */ 974 */
@@ -974,7 +989,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
974 return ret_val; 989 return ret_val;
975 } 990 }
976 991
977 /* Workaround: Disable padding in Kumeran interface in the MAC 992 /*
993 * Workaround: Disable padding in Kumeran interface in the MAC
978 * and in the PHY to avoid CRC errors. 994 * and in the PHY to avoid CRC errors.
979 */ 995 */
980 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); 996 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1007,9 +1023,11 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1007 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1023 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1008 ew32(CTRL, ctrl); 1024 ew32(CTRL, ctrl);
1009 1025
1010 /* Set the mac to wait the maximum time between each 1026 /*
1027 * Set the mac to wait the maximum time between each
1011 * iteration and increase the max iterations when 1028 * iteration and increase the max iterations when
1012 * polling the phy; this fixes erroneous timeouts at 10Mbps. */ 1029 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1030 */
1013 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1031 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1014 if (ret_val) 1032 if (ret_val)
1015 return ret_val; 1033 return ret_val;
@@ -1026,9 +1044,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1026 if (ret_val) 1044 if (ret_val)
1027 return ret_val; 1045 return ret_val;
1028 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1046 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1029 ret_val = e1000e_write_kmrn_reg(hw, 1047 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1030 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1048 reg_data);
1031 reg_data);
1032 if (ret_val) 1049 if (ret_val)
1033 return ret_val; 1050 return ret_val;
1034 1051
@@ -1056,9 +1073,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1056 u16 reg_data; 1073 u16 reg_data;
1057 1074
1058 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1075 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1059 ret_val = e1000e_write_kmrn_reg(hw, 1076 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1060 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1077 reg_data);
1061 reg_data);
1062 if (ret_val) 1078 if (ret_val)
1063 return ret_val; 1079 return ret_val;
1064 1080
@@ -1096,9 +1112,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1096 u32 tipg; 1112 u32 tipg;
1097 1113
1098 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1114 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1099 ret_val = e1000e_write_kmrn_reg(hw, 1115 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1100 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1116 reg_data);
1101 reg_data);
1102 if (ret_val) 1117 if (ret_val)
1103 return ret_val; 1118 return ret_val;
1104 1119
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f77a7427d3a0..3b94a87b5272 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Interrupt test (offline)", "Loopback test (offline)", 102 "Interrupt test (offline)", "Loopback test (offline)",
103 "Link test (on/offline)" 103 "Link test (on/offline)"
104}; 104};
105#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 105#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
106 106
107static int e1000_get_settings(struct net_device *netdev, 107static int e1000_get_settings(struct net_device *netdev,
108 struct ethtool_cmd *ecmd) 108 struct ethtool_cmd *ecmd)
@@ -226,8 +226,10 @@ static int e1000_set_settings(struct net_device *netdev,
226 struct e1000_adapter *adapter = netdev_priv(netdev); 226 struct e1000_adapter *adapter = netdev_priv(netdev);
227 struct e1000_hw *hw = &adapter->hw; 227 struct e1000_hw *hw = &adapter->hw;
228 228
229 /* When SoL/IDER sessions are active, autoneg/speed/duplex 229 /*
230 * cannot be changed */ 230 * When SoL/IDER sessions are active, autoneg/speed/duplex
231 * cannot be changed
232 */
231 if (e1000_check_reset_block(hw)) { 233 if (e1000_check_reset_block(hw)) {
232 ndev_err(netdev, "Cannot change link " 234 ndev_err(netdev, "Cannot change link "
233 "characteristics when SoL/IDER is active.\n"); 235 "characteristics when SoL/IDER is active.\n");
@@ -558,8 +560,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
558 ret_val = e1000_write_nvm(hw, first_word, 560 ret_val = e1000_write_nvm(hw, first_word,
559 last_word - first_word + 1, eeprom_buff); 561 last_word - first_word + 1, eeprom_buff);
560 562
561 /* Update the checksum over the first part of the EEPROM if needed 563 /*
562 * and flush shadow RAM for 82573 controllers */ 564 * Update the checksum over the first part of the EEPROM if needed
565 * and flush shadow RAM for 82573 controllers
566 */
563 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 567 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
564 (hw->mac.type == e1000_82573))) 568 (hw->mac.type == e1000_82573)))
565 e1000e_update_nvm_checksum(hw); 569 e1000e_update_nvm_checksum(hw);
@@ -578,8 +582,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
578 strncpy(drvinfo->driver, e1000e_driver_name, 32); 582 strncpy(drvinfo->driver, e1000e_driver_name, 32);
579 strncpy(drvinfo->version, e1000e_driver_version, 32); 583 strncpy(drvinfo->version, e1000e_driver_version, 32);
580 584
581 /* EEPROM image version # is reported as firmware version # for 585 /*
582 * PCI-E controllers */ 586 * EEPROM image version # is reported as firmware version # for
587 * PCI-E controllers
588 */
583 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); 589 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
584 sprintf(firmware_version, "%d.%d-%d", 590 sprintf(firmware_version, "%d.%d-%d",
585 (eeprom_data & 0xF000) >> 12, 591 (eeprom_data & 0xF000) >> 12,
@@ -658,8 +664,10 @@ static int e1000_set_ringparam(struct net_device *netdev,
658 if (err) 664 if (err)
659 goto err_setup_tx; 665 goto err_setup_tx;
660 666
661 /* save the new, restore the old in order to free it, 667 /*
662 * then restore the new back again */ 668 * restore the old in order to free it,
669 * then add in the new
670 */
663 adapter->rx_ring = rx_old; 671 adapter->rx_ring = rx_old;
664 adapter->tx_ring = tx_old; 672 adapter->tx_ring = tx_old;
665 e1000e_free_rx_resources(adapter); 673 e1000e_free_rx_resources(adapter);
@@ -758,7 +766,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
758 u32 i; 766 u32 i;
759 u32 toggle; 767 u32 toggle;
760 768
761 /* The status register is Read Only, so a write should fail. 769 /*
770 * The status register is Read Only, so a write should fail.
762 * Some bits that get toggled are ignored. 771 * Some bits that get toggled are ignored.
763 */ 772 */
764 switch (mac->type) { 773 switch (mac->type) {
@@ -908,7 +917,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
908 mask = 1 << i; 917 mask = 1 << i;
909 918
910 if (!shared_int) { 919 if (!shared_int) {
911 /* Disable the interrupt to be reported in 920 /*
921 * Disable the interrupt to be reported in
912 * the cause register and then force the same 922 * the cause register and then force the same
913 * interrupt and see if one gets posted. If 923 * interrupt and see if one gets posted. If
914 * an interrupt was posted to the bus, the 924 * an interrupt was posted to the bus, the
@@ -925,7 +935,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
925 } 935 }
926 } 936 }
927 937
928 /* Enable the interrupt to be reported in 938 /*
939 * Enable the interrupt to be reported in
929 * the cause register and then force the same 940 * the cause register and then force the same
930 * interrupt and see if one gets posted. If 941 * interrupt and see if one gets posted. If
931 * an interrupt was not posted to the bus, the 942 * an interrupt was not posted to the bus, the
@@ -942,7 +953,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
942 } 953 }
943 954
944 if (!shared_int) { 955 if (!shared_int) {
945 /* Disable the other interrupts to be reported in 956 /*
957 * Disable the other interrupts to be reported in
946 * the cause register and then force the other 958 * the cause register and then force the other
947 * interrupts and see if any get posted. If 959 * interrupts and see if any get posted. If
948 * an interrupt was posted to the bus, the 960 * an interrupt was posted to the bus, the
@@ -1216,8 +1228,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1216 adapter->hw.phy.type == e1000_phy_m88) { 1228 adapter->hw.phy.type == e1000_phy_m88) {
1217 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1229 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1218 } else { 1230 } else {
1219 /* Set the ILOS bit on the fiber Nic if half duplex link is 1231 /*
1220 * detected. */ 1232 * Set the ILOS bit on the fiber Nic if half duplex link is
1233 * detected.
1234 */
1221 stat_reg = er32(STATUS); 1235 stat_reg = er32(STATUS);
1222 if ((stat_reg & E1000_STATUS_FD) == 0) 1236 if ((stat_reg & E1000_STATUS_FD) == 0)
1223 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1237 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1225,7 +1239,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1225 1239
1226 ew32(CTRL, ctrl_reg); 1240 ew32(CTRL, ctrl_reg);
1227 1241
1228 /* Disable the receiver on the PHY so when a cable is plugged in, the 1242 /*
1243 * Disable the receiver on the PHY so when a cable is plugged in, the
1229 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1244 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1230 */ 1245 */
1231 if (adapter->hw.phy.type == e1000_phy_m88) 1246 if (adapter->hw.phy.type == e1000_phy_m88)
@@ -1244,8 +1259,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1244 1259
1245 /* special requirements for 82571/82572 fiber adapters */ 1260 /* special requirements for 82571/82572 fiber adapters */
1246 1261
1247 /* jump through hoops to make sure link is up because serdes 1262 /*
1248 * link is hardwired up */ 1263 * jump through hoops to make sure link is up because serdes
1264 * link is hardwired up
1265 */
1249 ctrl |= E1000_CTRL_SLU; 1266 ctrl |= E1000_CTRL_SLU;
1250 ew32(CTRL, ctrl); 1267 ew32(CTRL, ctrl);
1251 1268
@@ -1263,8 +1280,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1263 ew32(CTRL, ctrl); 1280 ew32(CTRL, ctrl);
1264 } 1281 }
1265 1282
1266 /* special write to serdes control register to enable SerDes analog 1283 /*
1267 * loopback */ 1284 * special write to serdes control register to enable SerDes analog
1285 * loopback
1286 */
1268#define E1000_SERDES_LB_ON 0x410 1287#define E1000_SERDES_LB_ON 0x410
1269 ew32(SCTL, E1000_SERDES_LB_ON); 1288 ew32(SCTL, E1000_SERDES_LB_ON);
1270 msleep(10); 1289 msleep(10);
@@ -1279,8 +1298,10 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1279 u32 ctrlext = er32(CTRL_EXT); 1298 u32 ctrlext = er32(CTRL_EXT);
1280 u32 ctrl = er32(CTRL); 1299 u32 ctrl = er32(CTRL);
1281 1300
1282 /* save CTRL_EXT to restore later, reuse an empty variable (unused 1301 /*
1283 on mac_type 80003es2lan) */ 1302 * save CTRL_EXT to restore later, reuse an empty variable (unused
1303 * on mac_type 80003es2lan)
1304 */
1284 adapter->tx_fifo_head = ctrlext; 1305 adapter->tx_fifo_head = ctrlext;
1285 1306
1286 /* clear the serdes mode bits, putting the device into mac loopback */ 1307 /* clear the serdes mode bits, putting the device into mac loopback */
@@ -1350,8 +1371,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1350 if (hw->media_type == e1000_media_type_fiber || 1371 if (hw->media_type == e1000_media_type_fiber ||
1351 hw->media_type == e1000_media_type_internal_serdes) { 1372 hw->media_type == e1000_media_type_internal_serdes) {
1352 /* restore CTRL_EXT, stealing space from tx_fifo_head */ 1373 /* restore CTRL_EXT, stealing space from tx_fifo_head */
1353 ew32(CTRL_EXT, 1374 ew32(CTRL_EXT, adapter->tx_fifo_head);
1354 adapter->tx_fifo_head);
1355 adapter->tx_fifo_head = 0; 1375 adapter->tx_fifo_head = 0;
1356 } 1376 }
1357 /* fall through */ 1377 /* fall through */
@@ -1414,7 +1434,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1414 1434
1415 ew32(RDT, rx_ring->count - 1); 1435 ew32(RDT, rx_ring->count - 1);
1416 1436
1417 /* Calculate the loop count based on the largest descriptor ring 1437 /*
1438 * Calculate the loop count based on the largest descriptor ring
1418 * The idea is to wrap the largest ring a number of times using 64 1439 * The idea is to wrap the largest ring a number of times using 64
1419 * send/receive pairs during each loop 1440 * send/receive pairs during each loop
1420 */ 1441 */
@@ -1454,7 +1475,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1454 l++; 1475 l++;
1455 if (l == rx_ring->count) 1476 if (l == rx_ring->count)
1456 l = 0; 1477 l = 0;
1457 /* time + 20 msecs (200 msecs on 2.4) is more than 1478 /*
1479 * time + 20 msecs (200 msecs on 2.4) is more than
1458 * enough time to complete the receives, if it's 1480 * enough time to complete the receives, if it's
1459 * exceeded, break and error off 1481 * exceeded, break and error off
1460 */ 1482 */
@@ -1473,8 +1495,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1473 1495
1474static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) 1496static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1475{ 1497{
1476 /* PHY loopback cannot be performed if SoL/IDER 1498 /*
1477 * sessions are active */ 1499 * PHY loopback cannot be performed if SoL/IDER
1500 * sessions are active
1501 */
1478 if (e1000_check_reset_block(&adapter->hw)) { 1502 if (e1000_check_reset_block(&adapter->hw)) {
1479 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1503 ndev_err(adapter->netdev, "Cannot do PHY loopback test "
1480 "when SoL/IDER is active.\n"); 1504 "when SoL/IDER is active.\n");
@@ -1508,8 +1532,10 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1508 int i = 0; 1532 int i = 0;
1509 hw->mac.serdes_has_link = 0; 1533 hw->mac.serdes_has_link = 0;
1510 1534
1511 /* On some blade server designs, link establishment 1535 /*
1512 * could take as long as 2-3 minutes */ 1536 * On some blade server designs, link establishment
1537 * could take as long as 2-3 minutes
1538 */
1513 do { 1539 do {
1514 hw->mac.ops.check_for_link(hw); 1540 hw->mac.ops.check_for_link(hw);
1515 if (hw->mac.serdes_has_link) 1541 if (hw->mac.serdes_has_link)
@@ -1562,8 +1588,10 @@ static void e1000_diag_test(struct net_device *netdev,
1562 1588
1563 ndev_info(netdev, "offline testing starting\n"); 1589 ndev_info(netdev, "offline testing starting\n");
1564 1590
1565 /* Link test performed before hardware reset so autoneg doesn't 1591 /*
1566 * interfere with test result */ 1592 * Link test performed before hardware reset so autoneg doesn't
1593 * interfere with test result
1594 */
1567 if (e1000_link_test(adapter, &data[4])) 1595 if (e1000_link_test(adapter, &data[4]))
1568 eth_test->flags |= ETH_TEST_FL_FAILED; 1596 eth_test->flags |= ETH_TEST_FL_FAILED;
1569 1597
@@ -1768,8 +1796,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1768 1796
1769 switch (stringset) { 1797 switch (stringset) {
1770 case ETH_SS_TEST: 1798 case ETH_SS_TEST:
1771 memcpy(data, *e1000_gstrings_test, 1799 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
1772 sizeof(e1000_gstrings_test));
1773 break; 1800 break;
1774 case ETH_SS_STATS: 1801 case ETH_SS_STATS:
1775 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1802 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 916025b30fc3..2346e2cb32d1 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -66,14 +66,14 @@ enum e1e_registers {
66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ 66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ 67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ 68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
69 E1000_RCTL = 0x00100, /* RX Control - RW */ 69 E1000_RCTL = 0x00100, /* Rx Control - RW */
70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ 70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
71 E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ 71 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
72 E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ 72 E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
73 E1000_TCTL = 0x00400, /* TX Control - RW */ 73 E1000_TCTL = 0x00400, /* Tx Control - RW */
74 E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ 74 E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
75 E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ 75 E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
76 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ 76 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */ 77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */
78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
@@ -87,12 +87,12 @@ enum e1e_registers {
87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
90 E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ 90 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
91 E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ 91 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
92 E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ 92 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
93 E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ 93 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
94 E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ 94 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
95 E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ 95 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
97 97
98/* Convenience macros 98/* Convenience macros
@@ -105,17 +105,17 @@ enum e1e_registers {
105 */ 105 */
106#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 106#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
107 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 107 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
108 E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ 108 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
109 E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ 109 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
110 E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ 110 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
111 E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ 111 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
112 E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ 112 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
113 E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ 113 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
114 E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ 114 E1000_TXDCTL = 0x03828, /* Tx Descriptor Control - RW */
115 E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ 115 E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
116 E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ 116 E1000_TARC0 = 0x03840, /* Tx Arbitration Count (0) */
117 E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ 117 E1000_TXDCTL1 = 0x03928, /* Tx Descriptor Control (1) - RW */
118 E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ 118 E1000_TARC1 = 0x03940, /* Tx Arbitration Count (1) */
119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ 119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ 120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ 121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
@@ -127,53 +127,53 @@ enum e1e_registers {
127 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ 127 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
128 E1000_COLC = 0x04028, /* Collision Count - R/clr */ 128 E1000_COLC = 0x04028, /* Collision Count - R/clr */
129 E1000_DC = 0x04030, /* Defer Count - R/clr */ 129 E1000_DC = 0x04030, /* Defer Count - R/clr */
130 E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ 130 E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
131 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ 131 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
132 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ 132 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
133 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ 133 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
134 E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ 134 E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
135 E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ 135 E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
136 E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ 136 E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
137 E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ 137 E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
138 E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ 138 E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
139 E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ 139 E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
140 E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ 140 E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
141 E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ 141 E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
142 E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ 142 E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
143 E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ 143 E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
144 E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ 144 E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
145 E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ 145 E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
146 E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ 146 E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
147 E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ 147 E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
148 E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ 148 E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
149 E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ 149 E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
150 E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ 150 E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
151 E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ 151 E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
152 E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ 152 E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
153 E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ 153 E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
154 E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ 154 E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
155 E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ 155 E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
156 E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ 156 E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
157 E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ 157 E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
158 E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ 158 E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
159 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ 159 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
160 E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ 160 E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
161 E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ 161 E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
162 E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ 162 E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
163 E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ 163 E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
164 E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ 164 E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
165 E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ 165 E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
166 E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ 166 E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
167 E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ 167 E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
168 E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ 168 E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
169 E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ 169 E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
170 E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ 170 E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
171 E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ 171 E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
172 E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ 172 E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
173 E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ 173 E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
174 E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ 174 E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
175 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ 175 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
176 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ 176 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
177 E1000_IAC = 0x04100, /* Interrupt Assertion Count */ 177 E1000_IAC = 0x04100, /* Interrupt Assertion Count */
178 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ 178 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
179 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ 179 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
@@ -183,7 +183,7 @@ enum e1e_registers {
183 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ 183 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ 184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ 185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
186 E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ 186 E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
187 E1000_RFCTL = 0x05008, /* Receive Filter Control */ 187 E1000_RFCTL = 0x05008, /* Receive Filter Control */
188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ 188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
189 E1000_RA = 0x05400, /* Receive Address - RW Array */ 189 E1000_RA = 0x05400, /* Receive Address - RW Array */
@@ -250,8 +250,8 @@ enum e1e_registers {
250#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 250#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
251 251
252#define E1000_HICR_EN 0x01 /* Enable bit - RO */ 252#define E1000_HICR_EN 0x01 /* Enable bit - RO */
253#define E1000_HICR_C 0x02 /* Driver sets this bit when done 253/* Driver sets this bit when done to put command in RAM */
254 * to put command in RAM */ 254#define E1000_HICR_C 0x02
255#define E1000_HICR_FW_RESET_ENABLE 0x40 255#define E1000_HICR_FW_RESET_ENABLE 0x40
256#define E1000_HICR_FW_RESET 0x80 256#define E1000_HICR_FW_RESET 0x80
257 257
@@ -685,8 +685,7 @@ struct e1000_mac_operations {
685 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 685 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
686 s32 (*led_on)(struct e1000_hw *); 686 s32 (*led_on)(struct e1000_hw *);
687 s32 (*led_off)(struct e1000_hw *); 687 s32 (*led_off)(struct e1000_hw *);
688 void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, 688 void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, u32);
689 u32);
690 s32 (*reset_hw)(struct e1000_hw *); 689 s32 (*reset_hw)(struct e1000_hw *);
691 s32 (*init_hw)(struct e1000_hw *); 690 s32 (*init_hw)(struct e1000_hw *);
692 s32 (*setup_link)(struct e1000_hw *); 691 s32 (*setup_link)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 0ae39550768d..844015648110 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
243 u32 sector_end_addr; 243 u32 sector_end_addr;
244 u16 i; 244 u16 i;
245 245
246 /* Can't read flash registers if the register set isn't mapped. 246 /* Can't read flash registers if the register set isn't mapped. */
247 */
248 if (!hw->flash_address) { 247 if (!hw->flash_address) {
249 hw_dbg(hw, "ERROR: Flash registers not mapped\n"); 248 hw_dbg(hw, "ERROR: Flash registers not mapped\n");
250 return -E1000_ERR_CONFIG; 249 return -E1000_ERR_CONFIG;
@@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
254 253
255 gfpreg = er32flash(ICH_FLASH_GFPREG); 254 gfpreg = er32flash(ICH_FLASH_GFPREG);
256 255
257 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 256 /*
257 * sector_X_addr is a "sector"-aligned address (4096 bytes)
258 * Add 1 to sector_end_addr since this sector is included in 258 * Add 1 to sector_end_addr since this sector is included in
259 * the overall size. */ 259 * the overall size.
260 */
260 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 261 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
261 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 262 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
262 263
263 /* flash_base_addr is byte-aligned */ 264 /* flash_base_addr is byte-aligned */
264 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 265 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
265 266
266 /* find total size of the NVM, then cut in half since the total 267 /*
267 * size represents two separate NVM banks. */ 268 * find total size of the NVM, then cut in half since the total
269 * size represents two separate NVM banks.
270 */
268 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 271 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
269 << FLASH_SECTOR_ADDR_SHIFT; 272 << FLASH_SECTOR_ADDR_SHIFT;
270 nvm->flash_bank_size /= 2; 273 nvm->flash_bank_size /= 2;
@@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
496 if (ret_val) 499 if (ret_val)
497 return ret_val; 500 return ret_val;
498 501
499 /* Initialize the PHY from the NVM on ICH platforms. This 502 /*
503 * Initialize the PHY from the NVM on ICH platforms. This
500 * is needed due to an issue where the NVM configuration is 504 * is needed due to an issue where the NVM configuration is
501 * not properly autoloaded after power transitions. 505 * not properly autoloaded after power transitions.
502 * Therefore, after each PHY reset, we will load the 506 * Therefore, after each PHY reset, we will load the
@@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
523 udelay(100); 527 udelay(100);
524 } while ((!data) && --loop); 528 } while ((!data) && --loop);
525 529
526 /* If basic configuration is incomplete before the above loop 530 /*
531 * If basic configuration is incomplete before the above loop
527 * count reaches 0, loading the configuration from NVM will 532 * count reaches 0, loading the configuration from NVM will
528 * leave the PHY in a bad state possibly resulting in no link. 533 * leave the PHY in a bad state possibly resulting in no link.
529 */ 534 */
@@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
536 data &= ~E1000_STATUS_LAN_INIT_DONE; 541 data &= ~E1000_STATUS_LAN_INIT_DONE;
537 ew32(STATUS, data); 542 ew32(STATUS, data);
538 543
539 /* Make sure HW does not configure LCD from PHY 544 /*
540 * extended configuration before SW configuration */ 545 * Make sure HW does not configure LCD from PHY
546 * extended configuration before SW configuration
547 */
541 data = er32(EXTCNF_CTRL); 548 data = er32(EXTCNF_CTRL);
542 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 549 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
543 return 0; 550 return 0;
@@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
551 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 558 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
552 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 559 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
553 560
554 /* Configure LCD from extended configuration 561 /* Configure LCD from extended configuration region. */
555 * region. */
556 562
557 /* cnf_base_addr is in DWORD */ 563 /* cnf_base_addr is in DWORD */
558 word_addr = (u16)(cnf_base_addr << 1); 564 word_addr = (u16)(cnf_base_addr << 1);
@@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
681 s32 ret_val; 687 s32 ret_val;
682 u16 phy_data, offset, mask; 688 u16 phy_data, offset, mask;
683 689
684 /* Polarity is determined based on the reversal feature 690 /*
685 * being enabled. 691 * Polarity is determined based on the reversal feature being enabled.
686 */ 692 */
687 if (phy->polarity_correction) { 693 if (phy->polarity_correction) {
688 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; 694 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
731 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 737 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
732 ew32(PHY_CTRL, phy_ctrl); 738 ew32(PHY_CTRL, phy_ctrl);
733 739
734 /* Call gig speed drop workaround on LPLU before accessing 740 /*
735 * any PHY registers */ 741 * Call gig speed drop workaround on LPLU before accessing
742 * any PHY registers
743 */
736 if ((hw->mac.type == e1000_ich8lan) && 744 if ((hw->mac.type == e1000_ich8lan) &&
737 (hw->phy.type == e1000_phy_igp_3)) 745 (hw->phy.type == e1000_phy_igp_3))
738 e1000e_gig_downshift_workaround_ich8lan(hw); 746 e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
747 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 755 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
748 ew32(PHY_CTRL, phy_ctrl); 756 ew32(PHY_CTRL, phy_ctrl);
749 757
750 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 758 /*
759 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
751 * during Dx states where the power conservation is most 760 * during Dx states where the power conservation is most
752 * important. During driver activity we should enable 761 * important. During driver activity we should enable
753 * SmartSpeed, so performance is maintained. */ 762 * SmartSpeed, so performance is maintained.
763 */
754 if (phy->smart_speed == e1000_smart_speed_on) { 764 if (phy->smart_speed == e1000_smart_speed_on) {
755 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 765 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
756 &data); 766 &data);
757 if (ret_val) 767 if (ret_val)
758 return ret_val; 768 return ret_val;
759 769
760 data |= IGP01E1000_PSCFR_SMART_SPEED; 770 data |= IGP01E1000_PSCFR_SMART_SPEED;
761 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 771 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
762 data); 772 data);
763 if (ret_val) 773 if (ret_val)
764 return ret_val; 774 return ret_val;
765 } else if (phy->smart_speed == e1000_smart_speed_off) { 775 } else if (phy->smart_speed == e1000_smart_speed_off) {
766 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 776 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
767 &data); 777 &data);
768 if (ret_val) 778 if (ret_val)
769 return ret_val; 779 return ret_val;
770 780
771 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 781 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
772 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 782 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
773 data); 783 data);
774 if (ret_val) 784 if (ret_val)
775 return ret_val; 785 return ret_val;
776 } 786 }
@@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
804 if (!active) { 814 if (!active) {
805 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 815 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
806 ew32(PHY_CTRL, phy_ctrl); 816 ew32(PHY_CTRL, phy_ctrl);
807 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 817 /*
818 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
808 * during Dx states where the power conservation is most 819 * during Dx states where the power conservation is most
809 * important. During driver activity we should enable 820 * important. During driver activity we should enable
810 * SmartSpeed, so performance is maintained. */ 821 * SmartSpeed, so performance is maintained.
822 */
811 if (phy->smart_speed == e1000_smart_speed_on) { 823 if (phy->smart_speed == e1000_smart_speed_on) {
812 ret_val = e1e_rphy(hw, 824 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
813 IGP01E1000_PHY_PORT_CONFIG, 825 &data);
814 &data);
815 if (ret_val) 826 if (ret_val)
816 return ret_val; 827 return ret_val;
817 828
818 data |= IGP01E1000_PSCFR_SMART_SPEED; 829 data |= IGP01E1000_PSCFR_SMART_SPEED;
819 ret_val = e1e_wphy(hw, 830 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
820 IGP01E1000_PHY_PORT_CONFIG, 831 data);
821 data);
822 if (ret_val) 832 if (ret_val)
823 return ret_val; 833 return ret_val;
824 } else if (phy->smart_speed == e1000_smart_speed_off) { 834 } else if (phy->smart_speed == e1000_smart_speed_off) {
825 ret_val = e1e_rphy(hw, 835 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
826 IGP01E1000_PHY_PORT_CONFIG, 836 &data);
827 &data);
828 if (ret_val) 837 if (ret_val)
829 return ret_val; 838 return ret_val;
830 839
831 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 840 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
832 ret_val = e1e_wphy(hw, 841 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
833 IGP01E1000_PHY_PORT_CONFIG, 842 data);
834 data);
835 if (ret_val) 843 if (ret_val)
836 return ret_val; 844 return ret_val;
837 } 845 }
@@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
841 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 849 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
842 ew32(PHY_CTRL, phy_ctrl); 850 ew32(PHY_CTRL, phy_ctrl);
843 851
844 /* Call gig speed drop workaround on LPLU before accessing 852 /*
845 * any PHY registers */ 853 * Call gig speed drop workaround on LPLU before accessing
854 * any PHY registers
855 */
846 if ((hw->mac.type == e1000_ich8lan) && 856 if ((hw->mac.type == e1000_ich8lan) &&
847 (hw->phy.type == e1000_phy_igp_3)) 857 (hw->phy.type == e1000_phy_igp_3))
848 e1000e_gig_downshift_workaround_ich8lan(hw); 858 e1000e_gig_downshift_workaround_ich8lan(hw);
849 859
850 /* When LPLU is enabled, we should disable SmartSpeed */ 860 /* When LPLU is enabled, we should disable SmartSpeed */
851 ret_val = e1e_rphy(hw, 861 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
852 IGP01E1000_PHY_PORT_CONFIG,
853 &data);
854 if (ret_val) 862 if (ret_val)
855 return ret_val; 863 return ret_val;
856 864
857 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 865 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
858 ret_val = e1e_wphy(hw, 866 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
859 IGP01E1000_PHY_PORT_CONFIG,
860 data);
861 } 867 }
862 868
863 return 0; 869 return 0;
@@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
944 950
945 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 951 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
946 952
947 /* Either we should have a hardware SPI cycle in progress 953 /*
954 * Either we should have a hardware SPI cycle in progress
948 * bit to check against, in order to start a new cycle or 955 * bit to check against, in order to start a new cycle or
949 * FDONE bit should be changed in the hardware so that it 956 * FDONE bit should be changed in the hardware so that it
950 * is 1 after hardware reset, which can then be used as an 957 * is 1 after hardware reset, which can then be used as an
@@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
953 */ 960 */
954 961
955 if (hsfsts.hsf_status.flcinprog == 0) { 962 if (hsfsts.hsf_status.flcinprog == 0) {
956 /* There is no cycle running at present, 963 /*
957 * so we can start a cycle */ 964 * There is no cycle running at present,
958 /* Begin by setting Flash Cycle Done. */ 965 * so we can start a cycle
966 * Begin by setting Flash Cycle Done.
967 */
959 hsfsts.hsf_status.flcdone = 1; 968 hsfsts.hsf_status.flcdone = 1;
960 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 969 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
961 ret_val = 0; 970 ret_val = 0;
962 } else { 971 } else {
963 /* otherwise poll for sometime so the current 972 /*
964 * cycle has a chance to end before giving up. */ 973 * otherwise poll for sometime so the current
974 * cycle has a chance to end before giving up.
975 */
965 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 976 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
966 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); 977 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
967 if (hsfsts.hsf_status.flcinprog == 0) { 978 if (hsfsts.hsf_status.flcinprog == 0) {
@@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
971 udelay(1); 982 udelay(1);
972 } 983 }
973 if (ret_val == 0) { 984 if (ret_val == 0) {
974 /* Successful in waiting for previous cycle to timeout, 985 /*
975 * now set the Flash Cycle Done. */ 986 * Successful in waiting for previous cycle to timeout,
987 * now set the Flash Cycle Done.
988 */
976 hsfsts.hsf_status.flcdone = 1; 989 hsfsts.hsf_status.flcdone = 1;
977 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 990 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
978 } else { 991 } else {
@@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1077 ret_val = e1000_flash_cycle_ich8lan(hw, 1090 ret_val = e1000_flash_cycle_ich8lan(hw,
1078 ICH_FLASH_READ_COMMAND_TIMEOUT); 1091 ICH_FLASH_READ_COMMAND_TIMEOUT);
1079 1092
1080 /* Check if FCERR is set to 1, if set to 1, clear it 1093 /*
1094 * Check if FCERR is set to 1, if set to 1, clear it
1081 * and try the whole sequence a few more times, else 1095 * and try the whole sequence a few more times, else
1082 * read in (shift in) the Flash Data0, the order is 1096 * read in (shift in) the Flash Data0, the order is
1083 * least significant byte first msb to lsb */ 1097 * least significant byte first msb to lsb
1098 */
1084 if (ret_val == 0) { 1099 if (ret_val == 0) {
1085 flash_data = er32flash(ICH_FLASH_FDATA0); 1100 flash_data = er32flash(ICH_FLASH_FDATA0);
1086 if (size == 1) { 1101 if (size == 1) {
@@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1090 } 1105 }
1091 break; 1106 break;
1092 } else { 1107 } else {
1093 /* If we've gotten here, then things are probably 1108 /*
1109 * If we've gotten here, then things are probably
1094 * completely hosed, but if the error condition is 1110 * completely hosed, but if the error condition is
1095 * detected, it won't hurt to give it another try... 1111 * detected, it won't hurt to give it another try...
1096 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 1112 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1168 1184
1169 ret_val = e1000e_update_nvm_checksum_generic(hw); 1185 ret_val = e1000e_update_nvm_checksum_generic(hw);
1170 if (ret_val) 1186 if (ret_val)
1171 return ret_val;; 1187 return ret_val;
1172 1188
1173 if (nvm->type != e1000_nvm_flash_sw) 1189 if (nvm->type != e1000_nvm_flash_sw)
1174 return ret_val;; 1190 return ret_val;
1175 1191
1176 ret_val = e1000_acquire_swflag_ich8lan(hw); 1192 ret_val = e1000_acquire_swflag_ich8lan(hw);
1177 if (ret_val) 1193 if (ret_val)
1178 return ret_val;; 1194 return ret_val;
1179 1195
1180 /* We're writing to the opposite bank so if we're on bank 1, 1196 /*
1197 * We're writing to the opposite bank so if we're on bank 1,
1181 * write to bank 0 etc. We also need to erase the segment that 1198 * write to bank 0 etc. We also need to erase the segment that
1182 * is going to be written */ 1199 * is going to be written
1200 */
1183 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1201 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
1184 new_bank_offset = nvm->flash_bank_size; 1202 new_bank_offset = nvm->flash_bank_size;
1185 old_bank_offset = 0; 1203 old_bank_offset = 0;
@@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1191 } 1209 }
1192 1210
1193 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1211 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
1194 /* Determine whether to write the value stored 1212 /*
1213 * Determine whether to write the value stored
1195 * in the other NVM bank or a modified value stored 1214 * in the other NVM bank or a modified value stored
1196 * in the shadow RAM */ 1215 * in the shadow RAM
1216 */
1197 if (dev_spec->shadow_ram[i].modified) { 1217 if (dev_spec->shadow_ram[i].modified) {
1198 data = dev_spec->shadow_ram[i].value; 1218 data = dev_spec->shadow_ram[i].value;
1199 } else { 1219 } else {
@@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1202 &data); 1222 &data);
1203 } 1223 }
1204 1224
1205 /* If the word is 0x13, then make sure the signature bits 1225 /*
1226 * If the word is 0x13, then make sure the signature bits
1206 * (15:14) are 11b until the commit has completed. 1227 * (15:14) are 11b until the commit has completed.
1207 * This will allow us to write 10b which indicates the 1228 * This will allow us to write 10b which indicates the
1208 * signature is valid. We want to do this after the write 1229 * signature is valid. We want to do this after the write
1209 * has completed so that we don't mark the segment valid 1230 * has completed so that we don't mark the segment valid
1210 * while the write is still in progress */ 1231 * while the write is still in progress
1232 */
1211 if (i == E1000_ICH_NVM_SIG_WORD) 1233 if (i == E1000_ICH_NVM_SIG_WORD)
1212 data |= E1000_ICH_NVM_SIG_MASK; 1234 data |= E1000_ICH_NVM_SIG_MASK;
1213 1235
@@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1230 break; 1252 break;
1231 } 1253 }
1232 1254
1233 /* Don't bother writing the segment valid bits if sector 1255 /*
1234 * programming failed. */ 1256 * Don't bother writing the segment valid bits if sector
1257 * programming failed.
1258 */
1235 if (ret_val) { 1259 if (ret_val) {
1236 hw_dbg(hw, "Flash commit failed.\n"); 1260 hw_dbg(hw, "Flash commit failed.\n");
1237 e1000_release_swflag_ich8lan(hw); 1261 e1000_release_swflag_ich8lan(hw);
1238 return ret_val; 1262 return ret_val;
1239 } 1263 }
1240 1264
1241 /* Finally validate the new segment by setting bit 15:14 1265 /*
1266 * Finally validate the new segment by setting bit 15:14
1242 * to 10b in word 0x13 , this can be done without an 1267 * to 10b in word 0x13 , this can be done without an
1243 * erase as well since these bits are 11 to start with 1268 * erase as well since these bits are 11 to start with
1244 * and we need to change bit 14 to 0b */ 1269 * and we need to change bit 14 to 0b
1270 */
1245 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1271 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1246 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1272 e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1247 data &= 0xBFFF; 1273 data &= 0xBFFF;
@@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1253 return ret_val; 1279 return ret_val;
1254 } 1280 }
1255 1281
1256 /* And invalidate the previously valid segment by setting 1282 /*
1283 * And invalidate the previously valid segment by setting
1257 * its signature word (0x13) high_byte to 0b. This can be 1284 * its signature word (0x13) high_byte to 0b. This can be
1258 * done without an erase because flash erase sets all bits 1285 * done without an erase because flash erase sets all bits
1259 * to 1's. We can write 1's to 0's without an erase */ 1286 * to 1's. We can write 1's to 0's without an erase
1287 */
1260 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1288 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1261 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1289 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1262 if (ret_val) { 1290 if (ret_val) {
@@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1272 1300
1273 e1000_release_swflag_ich8lan(hw); 1301 e1000_release_swflag_ich8lan(hw);
1274 1302
1275 /* Reload the EEPROM, or else modifications will not appear 1303 /*
1304 * Reload the EEPROM, or else modifications will not appear
1276 * until after the next adapter reset. 1305 * until after the next adapter reset.
1277 */ 1306 */
1278 e1000e_reload_nvm(hw); 1307 e1000e_reload_nvm(hw);
@@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1294 s32 ret_val; 1323 s32 ret_val;
1295 u16 data; 1324 u16 data;
1296 1325
1297 /* Read 0x19 and check bit 6. If this bit is 0, the checksum 1326 /*
1327 * Read 0x19 and check bit 6. If this bit is 0, the checksum
1298 * needs to be fixed. This bit is an indication that the NVM 1328 * needs to be fixed. This bit is an indication that the NVM
1299 * was prepared by OEM software and did not calculate the 1329 * was prepared by OEM software and did not calculate the
1300 * checksum...a likely scenario. 1330 * checksum...a likely scenario.
@@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1364 1394
1365 ew32flash(ICH_FLASH_FDATA0, flash_data); 1395 ew32flash(ICH_FLASH_FDATA0, flash_data);
1366 1396
1367 /* check if FCERR is set to 1 , if set to 1, clear it 1397 /*
1368 * and try the whole sequence a few more times else done */ 1398 * check if FCERR is set to 1 , if set to 1, clear it
1399 * and try the whole sequence a few more times else done
1400 */
1369 ret_val = e1000_flash_cycle_ich8lan(hw, 1401 ret_val = e1000_flash_cycle_ich8lan(hw,
1370 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 1402 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
1371 if (!ret_val) 1403 if (!ret_val)
1372 break; 1404 break;
1373 1405
1374 /* If we're here, then things are most likely 1406 /*
1407 * If we're here, then things are most likely
1375 * completely hosed, but if the error condition 1408 * completely hosed, but if the error condition
1376 * is detected, it won't hurt to give it another 1409 * is detected, it won't hurt to give it another
1377 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 1410 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1462 1495
1463 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1496 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1464 1497
1465 /* Determine HW Sector size: Read BERASE bits of hw flash status 1498 /*
1466 * register */ 1499 * Determine HW Sector size: Read BERASE bits of hw flash status
1467 /* 00: The Hw sector is 256 bytes, hence we need to erase 16 1500 * register
1501 * 00: The Hw sector is 256 bytes, hence we need to erase 16
1468 * consecutive sectors. The start index for the nth Hw sector 1502 * consecutive sectors. The start index for the nth Hw sector
1469 * can be calculated as = bank * 4096 + n * 256 1503 * can be calculated as = bank * 4096 + n * 256
1470 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 1504 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
@@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1511 if (ret_val) 1545 if (ret_val)
1512 return ret_val; 1546 return ret_val;
1513 1547
1514 /* Write a value 11 (block Erase) in Flash 1548 /*
1515 * Cycle field in hw flash control */ 1549 * Write a value 11 (block Erase) in Flash
1550 * Cycle field in hw flash control
1551 */
1516 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 1552 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1517 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 1553 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
1518 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 1554 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1519 1555
1520 /* Write the last 24 bits of an index within the 1556 /*
1557 * Write the last 24 bits of an index within the
1521 * block into Flash Linear address field in Flash 1558 * block into Flash Linear address field in Flash
1522 * Address. 1559 * Address.
1523 */ 1560 */
@@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1529 if (ret_val == 0) 1566 if (ret_val == 0)
1530 break; 1567 break;
1531 1568
1532 /* Check if FCERR is set to 1. If 1, 1569 /*
1570 * Check if FCERR is set to 1. If 1,
1533 * clear it and try the whole sequence 1571 * clear it and try the whole sequence
1534 * a few more times else Done */ 1572 * a few more times else Done
1573 */
1535 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1574 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1536 if (hsfsts.hsf_status.flcerr == 1) 1575 if (hsfsts.hsf_status.flcerr == 1)
1537 /* repeat for some time before 1576 /* repeat for some time before giving up */
1538 * giving up */
1539 continue; 1577 continue;
1540 else if (hsfsts.hsf_status.flcdone == 0) 1578 else if (hsfsts.hsf_status.flcdone == 0)
1541 return ret_val; 1579 return ret_val;
@@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
1585 1623
1586 ret_val = e1000e_get_bus_info_pcie(hw); 1624 ret_val = e1000e_get_bus_info_pcie(hw);
1587 1625
1588 /* ICH devices are "PCI Express"-ish. They have 1626 /*
1627 * ICH devices are "PCI Express"-ish. They have
1589 * a configuration space, but do not contain 1628 * a configuration space, but do not contain
1590 * PCI Express Capability registers, so bus width 1629 * PCI Express Capability registers, so bus width
1591 * must be hardcoded. 1630 * must be hardcoded.
@@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1608 u32 ctrl, icr, kab; 1647 u32 ctrl, icr, kab;
1609 s32 ret_val; 1648 s32 ret_val;
1610 1649
1611 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1650 /*
1651 * Prevent the PCI-E bus from sticking if there is no TLP connection
1612 * on the last TLP read/write transaction when MAC is reset. 1652 * on the last TLP read/write transaction when MAC is reset.
1613 */ 1653 */
1614 ret_val = e1000e_disable_pcie_master(hw); 1654 ret_val = e1000e_disable_pcie_master(hw);
@@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1619 hw_dbg(hw, "Masking off all interrupts\n"); 1659 hw_dbg(hw, "Masking off all interrupts\n");
1620 ew32(IMC, 0xffffffff); 1660 ew32(IMC, 0xffffffff);
1621 1661
1622 /* Disable the Transmit and Receive units. Then delay to allow 1662 /*
1663 * Disable the Transmit and Receive units. Then delay to allow
1623 * any pending transactions to complete before we hit the MAC 1664 * any pending transactions to complete before we hit the MAC
1624 * with the global reset. 1665 * with the global reset.
1625 */ 1666 */
@@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1640 ctrl = er32(CTRL); 1681 ctrl = er32(CTRL);
1641 1682
1642 if (!e1000_check_reset_block(hw)) { 1683 if (!e1000_check_reset_block(hw)) {
1643 /* PHY HW reset requires MAC CORE reset at the same 1684 /*
1685 * PHY HW reset requires MAC CORE reset at the same
1644 * time to make sure the interface between MAC and the 1686 * time to make sure the interface between MAC and the
1645 * external PHY is reset. 1687 * external PHY is reset.
1646 */ 1688 */
@@ -1724,8 +1766,10 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1724 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1766 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1725 ew32(TXDCTL1, txdctl); 1767 ew32(TXDCTL1, txdctl);
1726 1768
1727 /* ICH8 has opposite polarity of no_snoop bits. 1769 /*
1728 * By default, we should use snoop behavior. */ 1770 * ICH8 has opposite polarity of no_snoop bits.
1771 * By default, we should use snoop behavior.
1772 */
1729 if (mac->type == e1000_ich8lan) 1773 if (mac->type == e1000_ich8lan)
1730 snoop = PCIE_ICH8_SNOOP_ALL; 1774 snoop = PCIE_ICH8_SNOOP_ALL;
1731 else 1775 else
@@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1736 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 1780 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1737 ew32(CTRL_EXT, ctrl_ext); 1781 ew32(CTRL_EXT, ctrl_ext);
1738 1782
1739 /* Clear all of the statistics registers (clear on read). It is 1783 /*
1784 * Clear all of the statistics registers (clear on read). It is
1740 * important that we do this after we have tried to establish link 1785 * important that we do this after we have tried to establish link
1741 * because the symbol error count will increment wildly if there 1786 * because the symbol error count will increment wildly if there
1742 * is no link. 1787 * is no link.
@@ -1813,7 +1858,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
1813 if (e1000_check_reset_block(hw)) 1858 if (e1000_check_reset_block(hw))
1814 return 0; 1859 return 0;
1815 1860
1816 /* ICH parts do not have a word in the NVM to determine 1861 /*
1862 * ICH parts do not have a word in the NVM to determine
1817 * the default flow control setting, so we explicitly 1863 * the default flow control setting, so we explicitly
1818 * set it to full. 1864 * set it to full.
1819 */ 1865 */
@@ -1853,9 +1899,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1853 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1899 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1854 ew32(CTRL, ctrl); 1900 ew32(CTRL, ctrl);
1855 1901
1856 /* Set the mac to wait the maximum time between each iteration 1902 /*
1903 * Set the mac to wait the maximum time between each iteration
1857 * and increase the max iterations when polling the phy; 1904 * and increase the max iterations when polling the phy;
1858 * this fixes erroneous timeouts at 10Mbps. */ 1905 * this fixes erroneous timeouts at 10Mbps.
1906 */
1859 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1907 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1860 if (ret_val) 1908 if (ret_val)
1861 return ret_val; 1909 return ret_val;
@@ -1882,7 +1930,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1882 * @speed: pointer to store current link speed 1930 * @speed: pointer to store current link speed
1883 * @duplex: pointer to store the current link duplex 1931 * @duplex: pointer to store the current link duplex
1884 * 1932 *
1885 * Calls the generic get_speed_and_duplex to retreive the current link 1933 * Calls the generic get_speed_and_duplex to retrieve the current link
1886 * information and then calls the Kumeran lock loss workaround for links at 1934 * information and then calls the Kumeran lock loss workaround for links at
1887 * gigabit speeds. 1935 * gigabit speeds.
1888 **/ 1936 **/
@@ -1930,9 +1978,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1930 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 1978 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
1931 return 0; 1979 return 0;
1932 1980
1933 /* Make sure link is up before proceeding. If not just return. 1981 /*
1982 * Make sure link is up before proceeding. If not just return.
1934 * Attempting this while link is negotiating fouled up link 1983 * Attempting this while link is negotiating fouled up link
1935 * stability */ 1984 * stability
1985 */
1936 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1986 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1937 if (!link) 1987 if (!link)
1938 return 0; 1988 return 0;
@@ -1961,8 +2011,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2011 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
1962 ew32(PHY_CTRL, phy_ctrl); 2012 ew32(PHY_CTRL, phy_ctrl);
1963 2013
1964 /* Call gig speed drop workaround on Gig disable before accessing 2014 /*
1965 * any PHY registers */ 2015 * Call gig speed drop workaround on Gig disable before accessing
2016 * any PHY registers
2017 */
1966 e1000e_gig_downshift_workaround_ich8lan(hw); 2018 e1000e_gig_downshift_workaround_ich8lan(hw);
1967 2019
1968 /* unable to acquire PCS lock */ 2020 /* unable to acquire PCS lock */
@@ -1970,7 +2022,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1970} 2022}
1971 2023
1972/** 2024/**
1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state 2025 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
1974 * @hw: pointer to the HW structure 2026 * @hw: pointer to the HW structure
1975 * @state: boolean value used to set the current Kumeran workaround state 2027 * @state: boolean value used to set the current Kumeran workaround state
1976 * 2028 *
@@ -2017,8 +2069,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2069 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
2018 ew32(PHY_CTRL, reg); 2070 ew32(PHY_CTRL, reg);
2019 2071
2020 /* Call gig speed drop workaround on Gig disable before 2072 /*
2021 * accessing any PHY registers */ 2073 * Call gig speed drop workaround on Gig disable before
2074 * accessing any PHY registers
2075 */
2022 if (hw->mac.type == e1000_ich8lan) 2076 if (hw->mac.type == e1000_ich8lan)
2023 e1000e_gig_downshift_workaround_ich8lan(hw); 2077 e1000e_gig_downshift_workaround_ich8lan(hw);
2024 2078
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 073934c7f73a..b7eaff0a20ba 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -43,8 +43,8 @@ enum e1000_mng_mode {
43 43
44#define E1000_FACTPS_MNGCG 0x20000000 44#define E1000_FACTPS_MNGCG 0x20000000
45 45
46#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management 46/* Intel(R) Active Management Technology signature */
47 * Technology signature */ 47#define E1000_IAMT_SIGNATURE 0x544D4149
48 48
49/** 49/**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information 50 * e1000e_get_bus_info_pcie - Get PCIe bus information
@@ -142,7 +142,8 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142{ 142{
143 u32 rar_low, rar_high; 143 u32 rar_low, rar_high;
144 144
145 /* HW expects these in little endian so we reverse the byte order 145 /*
146 * HW expects these in little endian so we reverse the byte order
146 * from network order (big endian) to little endian 147 * from network order (big endian) to little endian
147 */ 148 */
148 rar_low = ((u32) addr[0] | 149 rar_low = ((u32) addr[0] |
@@ -171,7 +172,8 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171{ 172{
172 u32 hash_bit, hash_reg, mta; 173 u32 hash_bit, hash_reg, mta;
173 174
174 /* The MTA is a register array of 32-bit registers. It is 175 /*
176 * The MTA is a register array of 32-bit registers. It is
175 * treated like an array of (32*mta_reg_count) bits. We want to 177 * treated like an array of (32*mta_reg_count) bits. We want to
176 * set bit BitArray[hash_value]. So we figure out what register 178 * set bit BitArray[hash_value]. So we figure out what register
177 * the bit is in, read it, OR in the new bit, then write 179 * the bit is in, read it, OR in the new bit, then write
@@ -208,12 +210,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
208 /* Register count multiplied by bits per register */ 210 /* Register count multiplied by bits per register */
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 211 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210 212
211 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 213 /*
212 * where 0xFF would still fall within the hash mask. */ 214 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
215 * where 0xFF would still fall within the hash mask.
216 */
213 while (hash_mask >> bit_shift != 0xFF) 217 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++; 218 bit_shift++;
215 219
216 /* The portion of the address that is used for the hash table 220 /*
221 * The portion of the address that is used for the hash table
217 * is determined by the mc_filter_type setting. 222 * is determined by the mc_filter_type setting.
218 * The algorithm is such that there is a total of 8 bits of shifting. 223 * The algorithm is such that there is a total of 8 bits of shifting.
219 * The bit_shift for a mc_filter_type of 0 represents the number of 224 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -224,8 +229,8 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
224 * cases are a variation of this algorithm...essentially raising the 229 * cases are a variation of this algorithm...essentially raising the
225 * number of bits to shift mc_addr[5] left, while still keeping the 230 * number of bits to shift mc_addr[5] left, while still keeping the
226 * 8-bit shifting total. 231 * 8-bit shifting total.
227 */ 232 *
228 /* For example, given the following Destination MAC Address and an 233 * For example, given the following Destination MAC Address and an
229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 234 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230 * we can see that the bit_shift for case 0 is 4. These are the hash 235 * we can see that the bit_shift for case 0 is 4. These are the hash
231 * values resulting from each mc_filter_type... 236 * values resulting from each mc_filter_type...
@@ -279,7 +284,8 @@ void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
279 u32 hash_value; 284 u32 hash_value;
280 u32 i; 285 u32 i;
281 286
282 /* Load the first set of multicast addresses into the exact 287 /*
288 * Load the first set of multicast addresses into the exact
283 * filters (RAR). If there are not enough to fill the RAR 289 * filters (RAR). If there are not enough to fill the RAR
284 * array, clear the filters. 290 * array, clear the filters.
285 */ 291 */
@@ -375,7 +381,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
375 s32 ret_val; 381 s32 ret_val;
376 bool link; 382 bool link;
377 383
378 /* We only want to go out to the PHY registers to see if Auto-Neg 384 /*
385 * We only want to go out to the PHY registers to see if Auto-Neg
379 * has completed and/or if our link status has changed. The 386 * has completed and/or if our link status has changed. The
380 * get_link_status flag is set upon receiving a Link Status 387 * get_link_status flag is set upon receiving a Link Status
381 * Change or Rx Sequence Error interrupt. 388 * Change or Rx Sequence Error interrupt.
@@ -383,7 +390,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
383 if (!mac->get_link_status) 390 if (!mac->get_link_status)
384 return 0; 391 return 0;
385 392
386 /* First we want to see if the MII Status Register reports 393 /*
394 * First we want to see if the MII Status Register reports
387 * link. If so, then we want to get the current speed/duplex 395 * link. If so, then we want to get the current speed/duplex
388 * of the PHY. 396 * of the PHY.
389 */ 397 */
@@ -396,11 +404,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
396 404
397 mac->get_link_status = 0; 405 mac->get_link_status = 0;
398 406
399 /* Check if there was DownShift, must be checked 407 /*
400 * immediately after link-up */ 408 * Check if there was DownShift, must be checked
409 * immediately after link-up
410 */
401 e1000e_check_downshift(hw); 411 e1000e_check_downshift(hw);
402 412
403 /* If we are forcing speed/duplex, then we simply return since 413 /*
414 * If we are forcing speed/duplex, then we simply return since
404 * we have already determined whether we have link or not. 415 * we have already determined whether we have link or not.
405 */ 416 */
406 if (!mac->autoneg) { 417 if (!mac->autoneg) {
@@ -408,13 +419,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
408 return ret_val; 419 return ret_val;
409 } 420 }
410 421
411 /* Auto-Neg is enabled. Auto Speed Detection takes care 422 /*
423 * Auto-Neg is enabled. Auto Speed Detection takes care
412 * of MAC speed/duplex configuration. So we only need to 424 * of MAC speed/duplex configuration. So we only need to
413 * configure Collision Distance in the MAC. 425 * configure Collision Distance in the MAC.
414 */ 426 */
415 e1000e_config_collision_dist(hw); 427 e1000e_config_collision_dist(hw);
416 428
417 /* Configure Flow Control now that Auto-Neg has completed. 429 /*
430 * Configure Flow Control now that Auto-Neg has completed.
418 * First, we need to restore the desired flow control 431 * First, we need to restore the desired flow control
419 * settings because we may have had to re-autoneg with a 432 * settings because we may have had to re-autoneg with a
420 * different link partner. 433 * different link partner.
@@ -446,7 +459,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
446 status = er32(STATUS); 459 status = er32(STATUS);
447 rxcw = er32(RXCW); 460 rxcw = er32(RXCW);
448 461
449 /* If we don't have link (auto-negotiation failed or link partner 462 /*
463 * If we don't have link (auto-negotiation failed or link partner
450 * cannot auto-negotiate), the cable is plugged in (we have signal), 464 * cannot auto-negotiate), the cable is plugged in (we have signal),
451 * and our link partner is not trying to auto-negotiate with us (we 465 * and our link partner is not trying to auto-negotiate with us (we
452 * are receiving idles or data), we need to force link up. We also 466 * are receiving idles or data), we need to force link up. We also
@@ -477,7 +491,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
477 return ret_val; 491 return ret_val;
478 } 492 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 493 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480 /* If we are forcing link and we are receiving /C/ ordered 494 /*
495 * If we are forcing link and we are receiving /C/ ordered
481 * sets, re-enable auto-negotiation in the TXCW register 496 * sets, re-enable auto-negotiation in the TXCW register
482 * and disable forced link in the Device Control register 497 * and disable forced link in the Device Control register
483 * in an attempt to auto-negotiate with our link partner. 498 * in an attempt to auto-negotiate with our link partner.
@@ -511,7 +526,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
511 status = er32(STATUS); 526 status = er32(STATUS);
512 rxcw = er32(RXCW); 527 rxcw = er32(RXCW);
513 528
514 /* If we don't have link (auto-negotiation failed or link partner 529 /*
530 * If we don't have link (auto-negotiation failed or link partner
515 * cannot auto-negotiate), and our link partner is not trying to 531 * cannot auto-negotiate), and our link partner is not trying to
516 * auto-negotiate with us (we are receiving idles or data), 532 * auto-negotiate with us (we are receiving idles or data),
517 * we need to force link up. We also need to give auto-negotiation 533 * we need to force link up. We also need to give auto-negotiation
@@ -540,7 +556,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
540 return ret_val; 556 return ret_val;
541 } 557 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 558 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543 /* If we are forcing link and we are receiving /C/ ordered 559 /*
560 * If we are forcing link and we are receiving /C/ ordered
544 * sets, re-enable auto-negotiation in the TXCW register 561 * sets, re-enable auto-negotiation in the TXCW register
545 * and disable forced link in the Device Control register 562 * and disable forced link in the Device Control register
546 * in an attempt to auto-negotiate with our link partner. 563 * in an attempt to auto-negotiate with our link partner.
@@ -551,7 +568,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
551 568
552 mac->serdes_has_link = 1; 569 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554 /* If we force link for non-auto-negotiation switch, check 571 /*
572 * If we force link for non-auto-negotiation switch, check
555 * link status based on MAC synchronization for internal 573 * link status based on MAC synchronization for internal
556 * serdes media type. 574 * serdes media type.
557 */ 575 */
@@ -589,7 +607,8 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
589 s32 ret_val; 607 s32 ret_val;
590 u16 nvm_data; 608 u16 nvm_data;
591 609
592 /* Read and store word 0x0F of the EEPROM. This word contains bits 610 /*
611 * Read and store word 0x0F of the EEPROM. This word contains bits
593 * that determine the hardware's default PAUSE (flow control) mode, 612 * that determine the hardware's default PAUSE (flow control) mode,
594 * a bit that determines whether the HW defaults to enabling or 613 * a bit that determines whether the HW defaults to enabling or
595 * disabling auto-negotiation, and the direction of the 614 * disabling auto-negotiation, and the direction of the
@@ -630,7 +649,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
630 struct e1000_mac_info *mac = &hw->mac; 649 struct e1000_mac_info *mac = &hw->mac;
631 s32 ret_val; 650 s32 ret_val;
632 651
633 /* In the case of the phy reset being blocked, we already have a link. 652 /*
653 * In the case of the phy reset being blocked, we already have a link.
634 * We do not need to set it up again. 654 * We do not need to set it up again.
635 */ 655 */
636 if (e1000_check_reset_block(hw)) 656 if (e1000_check_reset_block(hw))
@@ -646,7 +666,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
646 return ret_val; 666 return ret_val;
647 } 667 }
648 668
649 /* We want to save off the original Flow Control configuration just 669 /*
670 * We want to save off the original Flow Control configuration just
650 * in case we get disconnected and then reconnected into a different 671 * in case we get disconnected and then reconnected into a different
651 * hub or switch with different Flow Control capabilities. 672 * hub or switch with different Flow Control capabilities.
652 */ 673 */
@@ -659,7 +680,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
659 if (ret_val) 680 if (ret_val)
660 return ret_val; 681 return ret_val;
661 682
662 /* Initialize the flow control address, type, and PAUSE timer 683 /*
684 * Initialize the flow control address, type, and PAUSE timer
663 * registers to their default values. This is done even if flow 685 * registers to their default values. This is done even if flow
664 * control is disabled, because it does not hurt anything to 686 * control is disabled, because it does not hurt anything to
665 * initialize these registers. 687 * initialize these registers.
@@ -686,7 +708,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
686 struct e1000_mac_info *mac = &hw->mac; 708 struct e1000_mac_info *mac = &hw->mac;
687 u32 txcw; 709 u32 txcw;
688 710
689 /* Check for a software override of the flow control settings, and 711 /*
712 * Check for a software override of the flow control settings, and
690 * setup the device accordingly. If auto-negotiation is enabled, then 713 * setup the device accordingly. If auto-negotiation is enabled, then
691 * software will have to set the "PAUSE" bits to the correct value in 714 * software will have to set the "PAUSE" bits to the correct value in
692 * the Transmit Config Word Register (TXCW) and re-start auto- 715 * the Transmit Config Word Register (TXCW) and re-start auto-
@@ -700,7 +723,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
700 * but not send pause frames). 723 * but not send pause frames).
701 * 2: Tx flow control is enabled (we can send pause frames but we 724 * 2: Tx flow control is enabled (we can send pause frames but we
702 * do not support receiving pause frames). 725 * do not support receiving pause frames).
703 * 3: Both Rx and TX flow control (symmetric) are enabled. 726 * 3: Both Rx and Tx flow control (symmetric) are enabled.
704 */ 727 */
705 switch (mac->fc) { 728 switch (mac->fc) {
706 case e1000_fc_none: 729 case e1000_fc_none:
@@ -708,23 +731,26 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
708 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 731 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
709 break; 732 break;
710 case e1000_fc_rx_pause: 733 case e1000_fc_rx_pause:
711 /* RX Flow control is enabled and TX Flow control is disabled 734 /*
735 * Rx Flow control is enabled and Tx Flow control is disabled
712 * by a software over-ride. Since there really isn't a way to 736 * by a software over-ride. Since there really isn't a way to
713 * advertise that we are capable of RX Pause ONLY, we will 737 * advertise that we are capable of Rx Pause ONLY, we will
714 * advertise that we support both symmetric and asymmetric RX 738 * advertise that we support both symmetric and asymmetric Rx
715 * PAUSE. Later, we will disable the adapter's ability to send 739 * PAUSE. Later, we will disable the adapter's ability to send
716 * PAUSE frames. 740 * PAUSE frames.
717 */ 741 */
718 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 742 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
719 break; 743 break;
720 case e1000_fc_tx_pause: 744 case e1000_fc_tx_pause:
721 /* TX Flow control is enabled, and RX Flow control is disabled, 745 /*
746 * Tx Flow control is enabled, and Rx Flow control is disabled,
722 * by a software over-ride. 747 * by a software over-ride.
723 */ 748 */
724 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 749 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
725 break; 750 break;
726 case e1000_fc_full: 751 case e1000_fc_full:
727 /* Flow control (both RX and TX) is enabled by a software 752 /*
753 * Flow control (both Rx and Tx) is enabled by a software
728 * over-ride. 754 * over-ride.
729 */ 755 */
730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 756 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -754,7 +780,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
754 u32 i, status; 780 u32 i, status;
755 s32 ret_val; 781 s32 ret_val;
756 782
757 /* If we have a signal (the cable is plugged in, or assumed true for 783 /*
784 * If we have a signal (the cable is plugged in, or assumed true for
758 * serdes media) then poll for a "Link-Up" indication in the Device 785 * serdes media) then poll for a "Link-Up" indication in the Device
759 * Status Register. Time-out if a link isn't seen in 500 milliseconds 786 * Status Register. Time-out if a link isn't seen in 500 milliseconds
760 * seconds (Auto-negotiation should complete in less than 500 787 * seconds (Auto-negotiation should complete in less than 500
@@ -769,7 +796,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
769 if (i == FIBER_LINK_UP_LIMIT) { 796 if (i == FIBER_LINK_UP_LIMIT) {
770 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 797 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
771 mac->autoneg_failed = 1; 798 mac->autoneg_failed = 1;
772 /* AutoNeg failed to achieve a link, so we'll call 799 /*
800 * AutoNeg failed to achieve a link, so we'll call
773 * mac->check_for_link. This routine will force the 801 * mac->check_for_link. This routine will force the
774 * link up if we detect a signal. This will allow us to 802 * link up if we detect a signal. This will allow us to
775 * communicate with non-autonegotiating link partners. 803 * communicate with non-autonegotiating link partners.
@@ -811,7 +839,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
811 if (ret_val) 839 if (ret_val)
812 return ret_val; 840 return ret_val;
813 841
814 /* Since auto-negotiation is enabled, take the link out of reset (the 842 /*
843 * Since auto-negotiation is enabled, take the link out of reset (the
815 * link will be in reset, because we previously reset the chip). This 844 * link will be in reset, because we previously reset the chip). This
816 * will restart auto-negotiation. If auto-negotiation is successful 845 * will restart auto-negotiation. If auto-negotiation is successful
817 * then the link-up status bit will be set and the flow control enable 846 * then the link-up status bit will be set and the flow control enable
@@ -823,7 +852,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
823 e1e_flush(); 852 e1e_flush();
824 msleep(1); 853 msleep(1);
825 854
826 /* For these adapters, the SW defineable pin 1 is set when the optics 855 /*
856 * For these adapters, the SW definable pin 1 is set when the optics
827 * detect a signal. If we have a signal, then poll for a "Link-Up" 857 * detect a signal. If we have a signal, then poll for a "Link-Up"
828 * indication. 858 * indication.
829 */ 859 */
@@ -864,21 +894,23 @@ void e1000e_config_collision_dist(struct e1000_hw *hw)
864 * 894 *
865 * Sets the flow control high/low threshold (watermark) registers. If 895 * Sets the flow control high/low threshold (watermark) registers. If
866 * flow control XON frame transmission is enabled, then set XON frame 896 * flow control XON frame transmission is enabled, then set XON frame
867 * tansmission as well. 897 * transmission as well.
868 **/ 898 **/
869s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) 899s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
870{ 900{
871 struct e1000_mac_info *mac = &hw->mac; 901 struct e1000_mac_info *mac = &hw->mac;
872 u32 fcrtl = 0, fcrth = 0; 902 u32 fcrtl = 0, fcrth = 0;
873 903
874 /* Set the flow control receive threshold registers. Normally, 904 /*
905 * Set the flow control receive threshold registers. Normally,
875 * these registers will be set to a default threshold that may be 906 * these registers will be set to a default threshold that may be
876 * adjusted later by the driver's runtime code. However, if the 907 * adjusted later by the driver's runtime code. However, if the
877 * ability to transmit pause frames is not enabled, then these 908 * ability to transmit pause frames is not enabled, then these
878 * registers will be set to 0. 909 * registers will be set to 0.
879 */ 910 */
880 if (mac->fc & e1000_fc_tx_pause) { 911 if (mac->fc & e1000_fc_tx_pause) {
881 /* We need to set up the Receive Threshold high and low water 912 /*
913 * We need to set up the Receive Threshold high and low water
882 * marks as well as (optionally) enabling the transmission of 914 * marks as well as (optionally) enabling the transmission of
883 * XON frames. 915 * XON frames.
884 */ 916 */
@@ -909,7 +941,8 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
909 941
910 ctrl = er32(CTRL); 942 ctrl = er32(CTRL);
911 943
912 /* Because we didn't get link via the internal auto-negotiation 944 /*
945 * Because we didn't get link via the internal auto-negotiation
913 * mechanism (we either forced link or we got link via PHY 946 * mechanism (we either forced link or we got link via PHY
914 * auto-neg), we have to manually enable/disable transmit an 947 * auto-neg), we have to manually enable/disable transmit an
915 * receive flow control. 948 * receive flow control.
@@ -923,7 +956,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
923 * frames but not send pause frames). 956 * frames but not send pause frames).
924 * 2: Tx flow control is enabled (we can send pause frames 957 * 2: Tx flow control is enabled (we can send pause frames
925 * frames but we do not receive pause frames). 958 * frames but we do not receive pause frames).
926 * 3: Both Rx and TX flow control (symmetric) is enabled. 959 * 3: Both Rx and Tx flow control (symmetric) is enabled.
927 * other: No other values should be possible at this point. 960 * other: No other values should be possible at this point.
928 */ 961 */
929 hw_dbg(hw, "mac->fc = %u\n", mac->fc); 962 hw_dbg(hw, "mac->fc = %u\n", mac->fc);
@@ -970,7 +1003,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
970 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 1003 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
971 u16 speed, duplex; 1004 u16 speed, duplex;
972 1005
973 /* Check for the case where we have fiber media and auto-neg failed 1006 /*
1007 * Check for the case where we have fiber media and auto-neg failed
974 * so we had to force link. In this case, we need to force the 1008 * so we had to force link. In this case, we need to force the
975 * configuration of the MAC to match the "fc" parameter. 1009 * configuration of the MAC to match the "fc" parameter.
976 */ 1010 */
@@ -988,13 +1022,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
988 return ret_val; 1022 return ret_val;
989 } 1023 }
990 1024
991 /* Check for the case where we have copper media and auto-neg is 1025 /*
1026 * Check for the case where we have copper media and auto-neg is
992 * enabled. In this case, we need to check and see if Auto-Neg 1027 * enabled. In this case, we need to check and see if Auto-Neg
993 * has completed, and if so, how the PHY and link partner has 1028 * has completed, and if so, how the PHY and link partner has
994 * flow control configured. 1029 * flow control configured.
995 */ 1030 */
996 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { 1031 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
997 /* Read the MII Status Register and check to see if AutoNeg 1032 /*
1033 * Read the MII Status Register and check to see if AutoNeg
998 * has completed. We read this twice because this reg has 1034 * has completed. We read this twice because this reg has
999 * some "sticky" (latched) bits. 1035 * some "sticky" (latched) bits.
1000 */ 1036 */
@@ -1011,7 +1047,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1011 return ret_val; 1047 return ret_val;
1012 } 1048 }
1013 1049
1014 /* The AutoNeg process has completed, so we now need to 1050 /*
1051 * The AutoNeg process has completed, so we now need to
1015 * read both the Auto Negotiation Advertisement 1052 * read both the Auto Negotiation Advertisement
1016 * Register (Address 4) and the Auto_Negotiation Base 1053 * Register (Address 4) and the Auto_Negotiation Base
1017 * Page Ability Register (Address 5) to determine how 1054 * Page Ability Register (Address 5) to determine how
@@ -1024,7 +1061,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1024 if (ret_val) 1061 if (ret_val)
1025 return ret_val; 1062 return ret_val;
1026 1063
1027 /* Two bits in the Auto Negotiation Advertisement Register 1064 /*
1065 * Two bits in the Auto Negotiation Advertisement Register
1028 * (Address 4) and two bits in the Auto Negotiation Base 1066 * (Address 4) and two bits in the Auto Negotiation Base
1029 * Page Ability Register (Address 5) determine flow control 1067 * Page Ability Register (Address 5) determine flow control
1030 * for both the PHY and the link partner. The following 1068 * for both the PHY and the link partner. The following
@@ -1045,8 +1083,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1045 * 1 | 1 | 0 | 0 | e1000_fc_none 1083 * 1 | 1 | 0 | 0 | e1000_fc_none
1046 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1084 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1047 * 1085 *
1048 */ 1086 *
1049 /* Are both PAUSE bits set to 1? If so, this implies 1087 * Are both PAUSE bits set to 1? If so, this implies
1050 * Symmetric Flow Control is enabled at both ends. The 1088 * Symmetric Flow Control is enabled at both ends. The
1051 * ASM_DIR bits are irrelevant per the spec. 1089 * ASM_DIR bits are irrelevant per the spec.
1052 * 1090 *
@@ -1060,9 +1098,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1060 */ 1098 */
1061 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1099 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1062 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 1100 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1063 /* Now we need to check if the user selected RX ONLY 1101 /*
1102 * Now we need to check if the user selected Rx ONLY
1064 * of pause frames. In this case, we had to advertise 1103 * of pause frames. In this case, we had to advertise
1065 * FULL flow control because we could not advertise RX 1104 * FULL flow control because we could not advertise Rx
1066 * ONLY. Hence, we must now check to see if we need to 1105 * ONLY. Hence, we must now check to see if we need to
1067 * turn OFF the TRANSMISSION of PAUSE frames. 1106 * turn OFF the TRANSMISSION of PAUSE frames.
1068 */ 1107 */
@@ -1075,7 +1114,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1075 "RX PAUSE frames only.\r\n"); 1114 "RX PAUSE frames only.\r\n");
1076 } 1115 }
1077 } 1116 }
1078 /* For receiving PAUSE frames ONLY. 1117 /*
1118 * For receiving PAUSE frames ONLY.
1079 * 1119 *
1080 * LOCAL DEVICE | LINK PARTNER 1120 * LOCAL DEVICE | LINK PARTNER
1081 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1121 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1090,7 +1130,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1090 mac->fc = e1000_fc_tx_pause; 1130 mac->fc = e1000_fc_tx_pause;
1091 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); 1131 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1092 } 1132 }
1093 /* For transmitting PAUSE frames ONLY. 1133 /*
1134 * For transmitting PAUSE frames ONLY.
1094 * 1135 *
1095 * LOCAL DEVICE | LINK PARTNER 1136 * LOCAL DEVICE | LINK PARTNER
1096 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1137 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1113,7 +1154,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1113 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1154 hw_dbg(hw, "Flow Control = NONE.\r\n");
1114 } 1155 }
1115 1156
1116 /* Now we need to do one last check... If we auto- 1157 /*
1158 * Now we need to do one last check... If we auto-
1117 * negotiated to HALF DUPLEX, flow control should not be 1159 * negotiated to HALF DUPLEX, flow control should not be
1118 * enabled per IEEE 802.3 spec. 1160 * enabled per IEEE 802.3 spec.
1119 */ 1161 */
@@ -1126,7 +1168,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1126 if (duplex == HALF_DUPLEX) 1168 if (duplex == HALF_DUPLEX)
1127 mac->fc = e1000_fc_none; 1169 mac->fc = e1000_fc_none;
1128 1170
1129 /* Now we call a subroutine to actually force the MAC 1171 /*
1172 * Now we call a subroutine to actually force the MAC
1130 * controller to use the correct flow control settings. 1173 * controller to use the correct flow control settings.
1131 */ 1174 */
1132 ret_val = e1000e_force_mac_fc(hw); 1175 ret_val = e1000e_force_mac_fc(hw);
@@ -1398,8 +1441,10 @@ s32 e1000e_blink_led(struct e1000_hw *hw)
1398 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1441 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1399 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1442 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1400 } else { 1443 } else {
1401 /* set the blink bit for each LED that's "on" (0x0E) 1444 /*
1402 * in ledctl_mode2 */ 1445 * set the blink bit for each LED that's "on" (0x0E)
1446 * in ledctl_mode2
1447 */
1403 ledctl_blink = hw->mac.ledctl_mode2; 1448 ledctl_blink = hw->mac.ledctl_mode2;
1404 for (i = 0; i < 4; i++) 1449 for (i = 0; i < 4; i++)
1405 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1450 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
@@ -1562,8 +1607,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1562 else 1607 else
1563 mac->current_ifs_val += 1608 mac->current_ifs_val +=
1564 mac->ifs_step_size; 1609 mac->ifs_step_size;
1565 ew32(AIT, 1610 ew32(AIT, mac->current_ifs_val);
1566 mac->current_ifs_val);
1567 } 1611 }
1568 } 1612 }
1569 } else { 1613 } else {
@@ -1826,10 +1870,12 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1826 udelay(1); 1870 udelay(1);
1827 timeout = NVM_MAX_RETRY_SPI; 1871 timeout = NVM_MAX_RETRY_SPI;
1828 1872
1829 /* Read "Status Register" repeatedly until the LSB is cleared. 1873 /*
1874 * Read "Status Register" repeatedly until the LSB is cleared.
1830 * The EEPROM will signal that the command has been completed 1875 * The EEPROM will signal that the command has been completed
1831 * by clearing bit 0 of the internal status register. If it's 1876 * by clearing bit 0 of the internal status register. If it's
1832 * not cleared within 'timeout', then error out. */ 1877 * not cleared within 'timeout', then error out.
1878 */
1833 while (timeout) { 1879 while (timeout) {
1834 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 1880 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1835 hw->nvm.opcode_bits); 1881 hw->nvm.opcode_bits);
@@ -1866,8 +1912,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1866 u32 i, eerd = 0; 1912 u32 i, eerd = 0;
1867 s32 ret_val = 0; 1913 s32 ret_val = 0;
1868 1914
1869 /* A check for invalid values: offset too large, too many words, 1915 /*
1870 * and not enough words. */ 1916 * A check for invalid values: offset too large, too many words,
1917 * too many words for the offset, and not enough words.
1918 */
1871 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1919 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1872 (words == 0)) { 1920 (words == 0)) {
1873 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1921 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1883,8 +1931,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1883 if (ret_val) 1931 if (ret_val)
1884 break; 1932 break;
1885 1933
1886 data[i] = (er32(EERD) >> 1934 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
1887 E1000_NVM_RW_REG_DATA);
1888 } 1935 }
1889 1936
1890 return ret_val; 1937 return ret_val;
@@ -1908,8 +1955,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1908 s32 ret_val; 1955 s32 ret_val;
1909 u16 widx = 0; 1956 u16 widx = 0;
1910 1957
1911 /* A check for invalid values: offset too large, too many words, 1958 /*
1912 * and not enough words. */ 1959 * A check for invalid values: offset too large, too many words,
1960 * and not enough words.
1961 */
1913 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1962 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1914 (words == 0)) { 1963 (words == 0)) {
1915 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1964 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1939,8 +1988,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1939 1988
1940 e1000_standby_nvm(hw); 1989 e1000_standby_nvm(hw);
1941 1990
1942 /* Some SPI eeproms use the 8th address bit embedded in the 1991 /*
1943 * opcode */ 1992 * Some SPI eeproms use the 8th address bit embedded in the
1993 * opcode
1994 */
1944 if ((nvm->address_bits == 8) && (offset >= 128)) 1995 if ((nvm->address_bits == 8) && (offset >= 128))
1945 write_opcode |= NVM_A8_OPCODE_SPI; 1996 write_opcode |= NVM_A8_OPCODE_SPI;
1946 1997
@@ -1985,9 +2036,9 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
1985 /* Check for an alternate MAC address. An alternate MAC 2036 /* Check for an alternate MAC address. An alternate MAC
1986 * address can be setup by pre-boot software and must be 2037 * address can be setup by pre-boot software and must be
1987 * treated like a permanent address and must override the 2038 * treated like a permanent address and must override the
1988 * actual permanent MAC address. */ 2039 * actual permanent MAC address.*/
1989 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 2040 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
1990 &mac_addr_offset); 2041 &mac_addr_offset);
1991 if (ret_val) { 2042 if (ret_val) {
1992 hw_dbg(hw, "NVM Read Error\n"); 2043 hw_dbg(hw, "NVM Read Error\n");
1993 return ret_val; 2044 return ret_val;
@@ -2000,7 +2051,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2000 mac_addr_offset += ETH_ALEN/sizeof(u16); 2051 mac_addr_offset += ETH_ALEN/sizeof(u16);
2001 2052
2002 /* make sure we have a valid mac address here 2053 /* make sure we have a valid mac address here
2003 * before using it */ 2054 * before using it */
2004 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, 2055 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2005 &nvm_data); 2056 &nvm_data);
2006 if (ret_val) { 2057 if (ret_val) {
@@ -2012,7 +2063,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2012 } 2063 }
2013 2064
2014 if (mac_addr_offset) 2065 if (mac_addr_offset)
2015 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2066 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2016 } 2067 }
2017 2068
2018 for (i = 0; i < ETH_ALEN; i += 2) { 2069 for (i = 0; i < ETH_ALEN; i += 2) {
@@ -2188,7 +2239,7 @@ bool e1000e_check_mng_mode(struct e1000_hw *hw)
2188} 2239}
2189 2240
2190/** 2241/**
2191 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX 2242 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2192 * @hw: pointer to the HW structure 2243 * @hw: pointer to the HW structure
2193 * 2244 *
2194 * Enables packet filtering on transmit packets if manageability is enabled 2245 * Enables packet filtering on transmit packets if manageability is enabled
@@ -2208,7 +2259,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2208 return 0; 2259 return 0;
2209 } 2260 }
2210 2261
2211 /* If we can't read from the host interface for whatever 2262 /*
2263 * If we can't read from the host interface for whatever
2212 * reason, disable filtering. 2264 * reason, disable filtering.
2213 */ 2265 */
2214 ret_val = e1000_mng_enable_host_if(hw); 2266 ret_val = e1000_mng_enable_host_if(hw);
@@ -2226,7 +2278,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2226 hdr->checksum = 0; 2278 hdr->checksum = 0;
2227 csum = e1000_calculate_checksum((u8 *)hdr, 2279 csum = e1000_calculate_checksum((u8 *)hdr,
2228 E1000_MNG_DHCP_COOKIE_LENGTH); 2280 E1000_MNG_DHCP_COOKIE_LENGTH);
2229 /* If either the checksums or signature don't match, then 2281 /*
2282 * If either the checksums or signature don't match, then
2230 * the cookie area isn't considered valid, in which case we 2283 * the cookie area isn't considered valid, in which case we
2231 * take the safe route of assuming Tx filtering is enabled. 2284 * take the safe route of assuming Tx filtering is enabled.
2232 */ 2285 */
@@ -2318,8 +2371,10 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2318 /* Calculate length in DWORDs */ 2371 /* Calculate length in DWORDs */
2319 length >>= 2; 2372 length >>= 2;
2320 2373
2321 /* The device driver writes the relevant command block into the 2374 /*
2322 * ram area. */ 2375 * The device driver writes the relevant command block into the
2376 * ram area.
2377 */
2323 for (i = 0; i < length; i++) { 2378 for (i = 0; i < length; i++) {
2324 for (j = 0; j < sizeof(u32); j++) { 2379 for (j = 0; j < sizeof(u32); j++) {
2325 *(tmp + j) = *bufptr++; 2380 *(tmp + j) = *bufptr++;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index f501dd5e7b16..88fac392d4e0 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -82,7 +82,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
82} 82}
83 83
84/** 84/**
85 * e1000_receive_skb - helper function to handle rx indications 85 * e1000_receive_skb - helper function to handle Rx indications
86 * @adapter: board private structure 86 * @adapter: board private structure
87 * @status: descriptor status field as written by hardware 87 * @status: descriptor status field as written by hardware
88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
@@ -138,8 +138,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
138 /* TCP checksum is good */ 138 /* TCP checksum is good */
139 skb->ip_summed = CHECKSUM_UNNECESSARY; 139 skb->ip_summed = CHECKSUM_UNNECESSARY;
140 } else { 140 } else {
141 /* IP fragment with UDP payload */ 141 /*
142 /* Hardware complements the payload checksum, so we undo it 142 * IP fragment with UDP payload
143 * Hardware complements the payload checksum, so we undo it
143 * and then put the value in host order for further stack use. 144 * and then put the value in host order for further stack use.
144 */ 145 */
145 __sum16 sum = (__force __sum16)htons(csum); 146 __sum16 sum = (__force __sum16)htons(csum);
@@ -182,7 +183,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
182 break; 183 break;
183 } 184 }
184 185
185 /* Make buffer alignment 2 beyond a 16 byte boundary 186 /*
187 * Make buffer alignment 2 beyond a 16 byte boundary
186 * this will result in a 16 byte aligned IP header after 188 * this will result in a 16 byte aligned IP header after
187 * the 14 byte MAC header is removed 189 * the 14 byte MAC header is removed
188 */ 190 */
@@ -213,10 +215,12 @@ map_skb:
213 if (i-- == 0) 215 if (i-- == 0)
214 i = (rx_ring->count - 1); 216 i = (rx_ring->count - 1);
215 217
216 /* Force memory writes to complete before letting h/w 218 /*
219 * Force memory writes to complete before letting h/w
217 * know there are new descriptors to fetch. (Only 220 * know there are new descriptors to fetch. (Only
218 * applicable for weak-ordered memory model archs, 221 * applicable for weak-ordered memory model archs,
219 * such as IA-64). */ 222 * such as IA-64).
223 */
220 wmb(); 224 wmb();
221 writel(i, adapter->hw.hw_addr + rx_ring->tail); 225 writel(i, adapter->hw.hw_addr + rx_ring->tail);
222 } 226 }
@@ -285,7 +289,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
285 break; 289 break;
286 } 290 }
287 291
288 /* Make buffer alignment 2 beyond a 16 byte boundary 292 /*
293 * Make buffer alignment 2 beyond a 16 byte boundary
289 * this will result in a 16 byte aligned IP header after 294 * this will result in a 16 byte aligned IP header after
290 * the 14 byte MAC header is removed 295 * the 14 byte MAC header is removed
291 */ 296 */
@@ -319,12 +324,15 @@ no_buffers:
319 if (!(i--)) 324 if (!(i--))
320 i = (rx_ring->count - 1); 325 i = (rx_ring->count - 1);
321 326
322 /* Force memory writes to complete before letting h/w 327 /*
328 * Force memory writes to complete before letting h/w
323 * know there are new descriptors to fetch. (Only 329 * know there are new descriptors to fetch. (Only
324 * applicable for weak-ordered memory model archs, 330 * applicable for weak-ordered memory model archs,
325 * such as IA-64). */ 331 * such as IA-64).
332 */
326 wmb(); 333 wmb();
327 /* Hardware increments by 16 bytes, but packet split 334 /*
335 * Hardware increments by 16 bytes, but packet split
328 * descriptors are 32 bytes...so we increment tail 336 * descriptors are 32 bytes...so we increment tail
329 * twice as much. 337 * twice as much.
330 */ 338 */
@@ -409,9 +417,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
409 total_rx_bytes += length; 417 total_rx_bytes += length;
410 total_rx_packets++; 418 total_rx_packets++;
411 419
412 /* code added for copybreak, this should improve 420 /*
421 * code added for copybreak, this should improve
413 * performance for small packets with large amounts 422 * performance for small packets with large amounts
414 * of reassembly being done in the stack */ 423 * of reassembly being done in the stack
424 */
415 if (length < copybreak) { 425 if (length < copybreak) {
416 struct sk_buff *new_skb = 426 struct sk_buff *new_skb =
417 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 427 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
@@ -581,14 +591,15 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
581 } 591 }
582 592
583 if (adapter->detect_tx_hung) { 593 if (adapter->detect_tx_hung) {
584 /* Detect a transmit hang in hardware, this serializes the 594 /*
585 * check with the clearing of time_stamp and movement of i */ 595 * Detect a transmit hang in hardware, this serializes the
596 * check with the clearing of time_stamp and movement of i
597 */
586 adapter->detect_tx_hung = 0; 598 adapter->detect_tx_hung = 0;
587 if (tx_ring->buffer_info[eop].dma && 599 if (tx_ring->buffer_info[eop].dma &&
588 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp 600 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
589 + (adapter->tx_timeout_factor * HZ)) 601 + (adapter->tx_timeout_factor * HZ))
590 && !(er32(STATUS) & 602 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
591 E1000_STATUS_TXOFF)) {
592 e1000_print_tx_hang(adapter); 603 e1000_print_tx_hang(adapter);
593 netif_stop_queue(netdev); 604 netif_stop_queue(netdev);
594 } 605 }
@@ -677,21 +688,28 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
677 skb_put(skb, length); 688 skb_put(skb, length);
678 689
679 { 690 {
680 /* this looks ugly, but it seems compiler issues make it 691 /*
681 more efficient than reusing j */ 692 * this looks ugly, but it seems compiler issues make it
693 * more efficient than reusing j
694 */
682 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 695 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
683 696
684 /* page alloc/put takes too long and effects small packet 697 /*
685 * throughput, so unsplit small packets and save the alloc/put*/ 698 * page alloc/put takes too long and effects small packet
699 * throughput, so unsplit small packets and save the alloc/put
700 * only valid in softirq (napi) context to call kmap_*
701 */
686 if (l1 && (l1 <= copybreak) && 702 if (l1 && (l1 <= copybreak) &&
687 ((length + l1) <= adapter->rx_ps_bsize0)) { 703 ((length + l1) <= adapter->rx_ps_bsize0)) {
688 u8 *vaddr; 704 u8 *vaddr;
689 705
690 ps_page = &buffer_info->ps_pages[0]; 706 ps_page = &buffer_info->ps_pages[0];
691 707
692 /* there is no documentation about how to call 708 /*
709 * there is no documentation about how to call
693 * kmap_atomic, so we can't hold the mapping 710 * kmap_atomic, so we can't hold the mapping
694 * very long */ 711 * very long
712 */
695 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 713 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
696 PAGE_SIZE, PCI_DMA_FROMDEVICE); 714 PAGE_SIZE, PCI_DMA_FROMDEVICE);
697 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 715 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
@@ -836,19 +854,25 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
836 struct e1000_hw *hw = &adapter->hw; 854 struct e1000_hw *hw = &adapter->hw;
837 u32 icr = er32(ICR); 855 u32 icr = er32(ICR);
838 856
839 /* read ICR disables interrupts using IAM */ 857 /*
858 * read ICR disables interrupts using IAM
859 */
840 860
841 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 861 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
842 hw->mac.get_link_status = 1; 862 hw->mac.get_link_status = 1;
843 /* ICH8 workaround-- Call gig speed drop workaround on cable 863 /*
844 * disconnect (LSC) before accessing any PHY registers */ 864 * ICH8 workaround-- Call gig speed drop workaround on cable
865 * disconnect (LSC) before accessing any PHY registers
866 */
845 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 867 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
846 (!(er32(STATUS) & E1000_STATUS_LU))) 868 (!(er32(STATUS) & E1000_STATUS_LU)))
847 e1000e_gig_downshift_workaround_ich8lan(hw); 869 e1000e_gig_downshift_workaround_ich8lan(hw);
848 870
849 /* 80003ES2LAN workaround-- For packet buffer work-around on 871 /*
872 * 80003ES2LAN workaround-- For packet buffer work-around on
850 * link down event; disable receives here in the ISR and reset 873 * link down event; disable receives here in the ISR and reset
851 * adapter in watchdog */ 874 * adapter in watchdog
875 */
852 if (netif_carrier_ok(netdev) && 876 if (netif_carrier_ok(netdev) &&
853 adapter->flags & FLAG_RX_NEEDS_RESTART) { 877 adapter->flags & FLAG_RX_NEEDS_RESTART) {
854 /* disable receives */ 878 /* disable receives */
@@ -886,23 +910,31 @@ static irqreturn_t e1000_intr(int irq, void *data)
886 if (!icr) 910 if (!icr)
887 return IRQ_NONE; /* Not our interrupt */ 911 return IRQ_NONE; /* Not our interrupt */
888 912
889 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 913 /*
890 * not set, then the adapter didn't send an interrupt */ 914 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
915 * not set, then the adapter didn't send an interrupt
916 */
891 if (!(icr & E1000_ICR_INT_ASSERTED)) 917 if (!(icr & E1000_ICR_INT_ASSERTED))
892 return IRQ_NONE; 918 return IRQ_NONE;
893 919
894 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 920 /*
895 * need for the IMC write */ 921 * Interrupt Auto-Mask...upon reading ICR,
922 * interrupts are masked. No need for the
923 * IMC write
924 */
896 925
897 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 926 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
898 hw->mac.get_link_status = 1; 927 hw->mac.get_link_status = 1;
899 /* ICH8 workaround-- Call gig speed drop workaround on cable 928 /*
900 * disconnect (LSC) before accessing any PHY registers */ 929 * ICH8 workaround-- Call gig speed drop workaround on cable
930 * disconnect (LSC) before accessing any PHY registers
931 */
901 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 932 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
902 (!(er32(STATUS) & E1000_STATUS_LU))) 933 (!(er32(STATUS) & E1000_STATUS_LU)))
903 e1000e_gig_downshift_workaround_ich8lan(hw); 934 e1000e_gig_downshift_workaround_ich8lan(hw);
904 935
905 /* 80003ES2LAN workaround-- 936 /*
937 * 80003ES2LAN workaround--
906 * For packet buffer work-around on link down event; 938 * For packet buffer work-around on link down event;
907 * disable receives here in the ISR and 939 * disable receives here in the ISR and
908 * reset adapter in watchdog 940 * reset adapter in watchdog
@@ -1011,8 +1043,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
1011 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 1043 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1012 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1044 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1013 ctrl_ext = er32(CTRL_EXT); 1045 ctrl_ext = er32(CTRL_EXT);
1014 ew32(CTRL_EXT, 1046 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1015 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1016 } 1047 }
1017} 1048}
1018 1049
@@ -1038,8 +1069,7 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
1038 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1069 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1039 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1070 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1040 ctrl_ext = er32(CTRL_EXT); 1071 ctrl_ext = er32(CTRL_EXT);
1041 ew32(CTRL_EXT, 1072 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1042 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1043 } 1073 }
1044} 1074}
1045 1075
@@ -1341,9 +1371,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
1341 1371
1342set_itr_now: 1372set_itr_now:
1343 if (new_itr != adapter->itr) { 1373 if (new_itr != adapter->itr) {
1344 /* this attempts to bias the interrupt rate towards Bulk 1374 /*
1375 * this attempts to bias the interrupt rate towards Bulk
1345 * by adding intermediate steps when interrupt rate is 1376 * by adding intermediate steps when interrupt rate is
1346 * increasing */ 1377 * increasing
1378 */
1347 new_itr = new_itr > adapter->itr ? 1379 new_itr = new_itr > adapter->itr ?
1348 min(adapter->itr + (new_itr >> 2), new_itr) : 1380 min(adapter->itr + (new_itr >> 2), new_itr) :
1349 new_itr; 1381 new_itr;
@@ -1354,7 +1386,7 @@ set_itr_now:
1354 1386
1355/** 1387/**
1356 * e1000_clean - NAPI Rx polling callback 1388 * e1000_clean - NAPI Rx polling callback
1357 * @adapter: board private structure 1389 * @napi: struct associated with this polling callback
1358 * @budget: amount of packets driver is allowed to process this poll 1390 * @budget: amount of packets driver is allowed to process this poll
1359 **/ 1391 **/
1360static int e1000_clean(struct napi_struct *napi, int budget) 1392static int e1000_clean(struct napi_struct *napi, int budget)
@@ -1366,10 +1398,12 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1366 /* Must NOT use netdev_priv macro here. */ 1398 /* Must NOT use netdev_priv macro here. */
1367 adapter = poll_dev->priv; 1399 adapter = poll_dev->priv;
1368 1400
1369 /* e1000_clean is called per-cpu. This lock protects 1401 /*
1402 * e1000_clean is called per-cpu. This lock protects
1370 * tx_ring from being cleaned by multiple cpus 1403 * tx_ring from being cleaned by multiple cpus
1371 * simultaneously. A failure obtaining the lock means 1404 * simultaneously. A failure obtaining the lock means
1372 * tx_ring is currently being cleaned anyway. */ 1405 * tx_ring is currently being cleaned anyway.
1406 */
1373 if (spin_trylock(&adapter->tx_queue_lock)) { 1407 if (spin_trylock(&adapter->tx_queue_lock)) {
1374 tx_cleaned = e1000_clean_tx_irq(adapter); 1408 tx_cleaned = e1000_clean_tx_irq(adapter);
1375 spin_unlock(&adapter->tx_queue_lock); 1409 spin_unlock(&adapter->tx_queue_lock);
@@ -1539,9 +1573,11 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
1539 1573
1540 manc = er32(MANC); 1574 manc = er32(MANC);
1541 1575
1542 /* enable receiving management packets to the host. this will probably 1576 /*
1577 * enable receiving management packets to the host. this will probably
1543 * generate destination unreachable messages from the host OS, but 1578 * generate destination unreachable messages from the host OS, but
1544 * the packets will be handled on SMBUS */ 1579 * the packets will be handled on SMBUS
1580 */
1545 manc |= E1000_MANC_EN_MNG2HOST; 1581 manc |= E1000_MANC_EN_MNG2HOST;
1546 manc2h = er32(MANC2H); 1582 manc2h = er32(MANC2H);
1547#define E1000_MNG2HOST_PORT_623 (1 << 5) 1583#define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -1591,7 +1627,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1591 1627
1592 /* Set the Tx Interrupt Delay register */ 1628 /* Set the Tx Interrupt Delay register */
1593 ew32(TIDV, adapter->tx_int_delay); 1629 ew32(TIDV, adapter->tx_int_delay);
1594 /* tx irq moderation */ 1630 /* Tx irq moderation */
1595 ew32(TADV, adapter->tx_abs_int_delay); 1631 ew32(TADV, adapter->tx_abs_int_delay);
1596 1632
1597 /* Program the Transmit Control Register */ 1633 /* Program the Transmit Control Register */
@@ -1602,8 +1638,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1602 1638
1603 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 1639 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1604 tarc = er32(TARC0); 1640 tarc = er32(TARC0);
1605 /* set the speed mode bit, we'll clear it if we're not at 1641 /*
1606 * gigabit link later */ 1642 * set the speed mode bit, we'll clear it if we're not at
1643 * gigabit link later
1644 */
1607#define SPEED_MODE_BIT (1 << 21) 1645#define SPEED_MODE_BIT (1 << 21)
1608 tarc |= SPEED_MODE_BIT; 1646 tarc |= SPEED_MODE_BIT;
1609 ew32(TARC0, tarc); 1647 ew32(TARC0, tarc);
@@ -1724,8 +1762,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1724 /* Configure extra packet-split registers */ 1762 /* Configure extra packet-split registers */
1725 rfctl = er32(RFCTL); 1763 rfctl = er32(RFCTL);
1726 rfctl |= E1000_RFCTL_EXTEN; 1764 rfctl |= E1000_RFCTL_EXTEN;
1727 /* disable packet split support for IPv6 extension headers, 1765 /*
1728 * because some malformed IPv6 headers can hang the RX */ 1766 * disable packet split support for IPv6 extension headers,
1767 * because some malformed IPv6 headers can hang the Rx
1768 */
1729 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 1769 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1730 E1000_RFCTL_NEW_IPV6_EXT_DIS); 1770 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1731 1771
@@ -1794,8 +1834,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1794 /* irq moderation */ 1834 /* irq moderation */
1795 ew32(RADV, adapter->rx_abs_int_delay); 1835 ew32(RADV, adapter->rx_abs_int_delay);
1796 if (adapter->itr_setting != 0) 1836 if (adapter->itr_setting != 0)
1797 ew32(ITR, 1837 ew32(ITR, 1000000000 / (adapter->itr * 256));
1798 1000000000 / (adapter->itr * 256));
1799 1838
1800 ctrl_ext = er32(CTRL_EXT); 1839 ctrl_ext = er32(CTRL_EXT);
1801 /* Reset delay timers after every interrupt */ 1840 /* Reset delay timers after every interrupt */
@@ -1806,8 +1845,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1806 ew32(CTRL_EXT, ctrl_ext); 1845 ew32(CTRL_EXT, ctrl_ext);
1807 e1e_flush(); 1846 e1e_flush();
1808 1847
1809 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1848 /*
1810 * the Base and Length of the Rx Descriptor Ring */ 1849 * Setup the HW Rx Head and Tail Descriptor Pointers and
1850 * the Base and Length of the Rx Descriptor Ring
1851 */
1811 rdba = rx_ring->dma; 1852 rdba = rx_ring->dma;
1812 ew32(RDBAL, (rdba & DMA_32BIT_MASK)); 1853 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
1813 ew32(RDBAH, (rdba >> 32)); 1854 ew32(RDBAH, (rdba >> 32));
@@ -1822,8 +1863,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1822 if (adapter->flags & FLAG_RX_CSUM_ENABLED) { 1863 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
1823 rxcsum |= E1000_RXCSUM_TUOFL; 1864 rxcsum |= E1000_RXCSUM_TUOFL;
1824 1865
1825 /* IPv4 payload checksum for UDP fragments must be 1866 /*
1826 * used in conjunction with packet-split. */ 1867 * IPv4 payload checksum for UDP fragments must be
1868 * used in conjunction with packet-split.
1869 */
1827 if (adapter->rx_ps_pages) 1870 if (adapter->rx_ps_pages)
1828 rxcsum |= E1000_RXCSUM_IPPCSE; 1871 rxcsum |= E1000_RXCSUM_IPPCSE;
1829 } else { 1872 } else {
@@ -1832,9 +1875,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1832 } 1875 }
1833 ew32(RXCSUM, rxcsum); 1876 ew32(RXCSUM, rxcsum);
1834 1877
1835 /* Enable early receives on supported devices, only takes effect when 1878 /*
1879 * Enable early receives on supported devices, only takes effect when
1836 * packet size is equal or larger than the specified value (in 8 byte 1880 * packet size is equal or larger than the specified value (in 8 byte
1837 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ 1881 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1882 */
1838 if ((adapter->flags & FLAG_HAS_ERT) && 1883 if ((adapter->flags & FLAG_HAS_ERT) &&
1839 (adapter->netdev->mtu > ETH_DATA_LEN)) 1884 (adapter->netdev->mtu > ETH_DATA_LEN))
1840 ew32(ERT, E1000_ERT_2048); 1885 ew32(ERT, E1000_ERT_2048);
@@ -1930,7 +1975,7 @@ static void e1000_set_multi(struct net_device *netdev)
1930} 1975}
1931 1976
1932/** 1977/**
1933 * e1000_configure - configure the hardware for RX and TX 1978 * e1000_configure - configure the hardware for Rx and Tx
1934 * @adapter: private board structure 1979 * @adapter: private board structure
1935 **/ 1980 **/
1936static void e1000_configure(struct e1000_adapter *adapter) 1981static void e1000_configure(struct e1000_adapter *adapter)
@@ -1943,8 +1988,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
1943 e1000_configure_tx(adapter); 1988 e1000_configure_tx(adapter);
1944 e1000_setup_rctl(adapter); 1989 e1000_setup_rctl(adapter);
1945 e1000_configure_rx(adapter); 1990 e1000_configure_rx(adapter);
1946 adapter->alloc_rx_buf(adapter, 1991 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
1947 e1000_desc_unused(adapter->rx_ring));
1948} 1992}
1949 1993
1950/** 1994/**
@@ -1961,8 +2005,10 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
1961 2005
1962 /* Just clear the power down bit to wake the phy back up */ 2006 /* Just clear the power down bit to wake the phy back up */
1963 if (adapter->hw.media_type == e1000_media_type_copper) { 2007 if (adapter->hw.media_type == e1000_media_type_copper) {
1964 /* according to the manual, the phy will retain its 2008 /*
1965 * settings across a power-down/up cycle */ 2009 * According to the manual, the phy will retain its
2010 * settings across a power-down/up cycle
2011 */
1966 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); 2012 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
1967 mii_reg &= ~MII_CR_POWER_DOWN; 2013 mii_reg &= ~MII_CR_POWER_DOWN;
1968 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); 2014 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
@@ -1991,8 +2037,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
1991 return; 2037 return;
1992 2038
1993 /* reset is blocked because of a SoL/IDER session */ 2039 /* reset is blocked because of a SoL/IDER session */
1994 if (e1000e_check_mng_mode(hw) || 2040 if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
1995 e1000_check_reset_block(hw))
1996 return; 2041 return;
1997 2042
1998 /* manageability (AMT) is enabled */ 2043 /* manageability (AMT) is enabled */
@@ -2012,7 +2057,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
2012 * This function boots the hardware and enables some settings that 2057 * This function boots the hardware and enables some settings that
2013 * require a configuration cycle of the hardware - those cannot be 2058 * require a configuration cycle of the hardware - those cannot be
2014 * set/changed during runtime. After reset the device needs to be 2059 * set/changed during runtime. After reset the device needs to be
2015 * properly configured for rx, tx etc. 2060 * properly configured for Rx, Tx etc.
2016 */ 2061 */
2017void e1000e_reset(struct e1000_adapter *adapter) 2062void e1000e_reset(struct e1000_adapter *adapter)
2018{ 2063{
@@ -2022,23 +2067,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
2022 u32 pba; 2067 u32 pba;
2023 u16 hwm; 2068 u16 hwm;
2024 2069
2070 /* reset Packet Buffer Allocation to default */
2025 ew32(PBA, adapter->pba); 2071 ew32(PBA, adapter->pba);
2026 2072
2027 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { 2073 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2028 /* To maintain wire speed transmits, the Tx FIFO should be 2074 /*
2075 * To maintain wire speed transmits, the Tx FIFO should be
2029 * large enough to accommodate two full transmit packets, 2076 * large enough to accommodate two full transmit packets,
2030 * rounded up to the next 1KB and expressed in KB. Likewise, 2077 * rounded up to the next 1KB and expressed in KB. Likewise,
2031 * the Rx FIFO should be large enough to accommodate at least 2078 * the Rx FIFO should be large enough to accommodate at least
2032 * one full receive packet and is similarly rounded up and 2079 * one full receive packet and is similarly rounded up and
2033 * expressed in KB. */ 2080 * expressed in KB.
2081 */
2034 pba = er32(PBA); 2082 pba = er32(PBA);
2035 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2083 /* upper 16 bits has Tx packet buffer allocation size in KB */
2036 tx_space = pba >> 16; 2084 tx_space = pba >> 16;
2037 /* lower 16 bits has Rx packet buffer allocation size in KB */ 2085 /* lower 16 bits has Rx packet buffer allocation size in KB */
2038 pba &= 0xffff; 2086 pba &= 0xffff;
2039 /* the tx fifo also stores 16 bytes of information about the tx 2087 /*
2040 * but don't include ethernet FCS because hardware appends it */ 2088 * the Tx fifo also stores 16 bytes of information about the tx
2041 min_tx_space = (mac->max_frame_size + 2089 * but don't include ethernet FCS because hardware appends it
2090 */ min_tx_space = (mac->max_frame_size +
2042 sizeof(struct e1000_tx_desc) - 2091 sizeof(struct e1000_tx_desc) -
2043 ETH_FCS_LEN) * 2; 2092 ETH_FCS_LEN) * 2;
2044 min_tx_space = ALIGN(min_tx_space, 1024); 2093 min_tx_space = ALIGN(min_tx_space, 1024);
@@ -2048,15 +2097,19 @@ void e1000e_reset(struct e1000_adapter *adapter)
2048 min_rx_space = ALIGN(min_rx_space, 1024); 2097 min_rx_space = ALIGN(min_rx_space, 1024);
2049 min_rx_space >>= 10; 2098 min_rx_space >>= 10;
2050 2099
2051 /* If current Tx allocation is less than the min Tx FIFO size, 2100 /*
2101 * If current Tx allocation is less than the min Tx FIFO size,
2052 * and the min Tx FIFO size is less than the current Rx FIFO 2102 * and the min Tx FIFO size is less than the current Rx FIFO
2053 * allocation, take space away from current Rx allocation */ 2103 * allocation, take space away from current Rx allocation
2104 */
2054 if ((tx_space < min_tx_space) && 2105 if ((tx_space < min_tx_space) &&
2055 ((min_tx_space - tx_space) < pba)) { 2106 ((min_tx_space - tx_space) < pba)) {
2056 pba -= min_tx_space - tx_space; 2107 pba -= min_tx_space - tx_space;
2057 2108
2058 /* if short on rx space, rx wins and must trump tx 2109 /*
2059 * adjustment or use Early Receive if available */ 2110 * if short on Rx space, Rx wins and must trump tx
2111 * adjustment or use Early Receive if available
2112 */
2060 if ((pba < min_rx_space) && 2113 if ((pba < min_rx_space) &&
2061 (!(adapter->flags & FLAG_HAS_ERT))) 2114 (!(adapter->flags & FLAG_HAS_ERT)))
2062 /* ERT enabled in e1000_configure_rx */ 2115 /* ERT enabled in e1000_configure_rx */
@@ -2067,14 +2120,17 @@ void e1000e_reset(struct e1000_adapter *adapter)
2067 } 2120 }
2068 2121
2069 2122
2070 /* flow control settings */ 2123 /*
2071 /* The high water mark must be low enough to fit one full frame 2124 * flow control settings
2125 *
2126 * The high water mark must be low enough to fit one full frame
2072 * (or the size used for early receive) above it in the Rx FIFO. 2127 * (or the size used for early receive) above it in the Rx FIFO.
2073 * Set it to the lower of: 2128 * Set it to the lower of:
2074 * - 90% of the Rx FIFO size, and 2129 * - 90% of the Rx FIFO size, and
2075 * - the full Rx FIFO size minus the early receive size (for parts 2130 * - the full Rx FIFO size minus the early receive size (for parts
2076 * with ERT support assuming ERT set to E1000_ERT_2048), or 2131 * with ERT support assuming ERT set to E1000_ERT_2048), or
2077 * - the full Rx FIFO size minus one full frame */ 2132 * - the full Rx FIFO size minus one full frame
2133 */
2078 if (adapter->flags & FLAG_HAS_ERT) 2134 if (adapter->flags & FLAG_HAS_ERT)
2079 hwm = min(((adapter->pba << 10) * 9 / 10), 2135 hwm = min(((adapter->pba << 10) * 9 / 10),
2080 ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); 2136 ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
@@ -2108,9 +2164,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
2108 2164
2109 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { 2165 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2110 u16 phy_data = 0; 2166 u16 phy_data = 0;
2111 /* speed up time to link by disabling smart power down, ignore 2167 /*
2168 * speed up time to link by disabling smart power down, ignore
2112 * the return value of this function because there is nothing 2169 * the return value of this function because there is nothing
2113 * different we would do if it failed */ 2170 * different we would do if it failed
2171 */
2114 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 2172 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2115 phy_data &= ~IGP02E1000_PM_SPD; 2173 phy_data &= ~IGP02E1000_PM_SPD;
2116 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 2174 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -2140,8 +2198,10 @@ void e1000e_down(struct e1000_adapter *adapter)
2140 struct e1000_hw *hw = &adapter->hw; 2198 struct e1000_hw *hw = &adapter->hw;
2141 u32 tctl, rctl; 2199 u32 tctl, rctl;
2142 2200
2143 /* signal that we're down so the interrupt handler does not 2201 /*
2144 * reschedule our watchdog timer */ 2202 * signal that we're down so the interrupt handler does not
2203 * reschedule our watchdog timer
2204 */
2145 set_bit(__E1000_DOWN, &adapter->state); 2205 set_bit(__E1000_DOWN, &adapter->state);
2146 2206
2147 /* disable receives in the hardware */ 2207 /* disable receives in the hardware */
@@ -2272,16 +2332,20 @@ static int e1000_open(struct net_device *netdev)
2272 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 2332 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2273 e1000_update_mng_vlan(adapter); 2333 e1000_update_mng_vlan(adapter);
2274 2334
2275 /* If AMT is enabled, let the firmware know that the network 2335 /*
2276 * interface is now open */ 2336 * If AMT is enabled, let the firmware know that the network
2337 * interface is now open
2338 */
2277 if ((adapter->flags & FLAG_HAS_AMT) && 2339 if ((adapter->flags & FLAG_HAS_AMT) &&
2278 e1000e_check_mng_mode(&adapter->hw)) 2340 e1000e_check_mng_mode(&adapter->hw))
2279 e1000_get_hw_control(adapter); 2341 e1000_get_hw_control(adapter);
2280 2342
2281 /* before we allocate an interrupt, we must be ready to handle it. 2343 /*
2344 * before we allocate an interrupt, we must be ready to handle it.
2282 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2345 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2283 * as soon as we call pci_request_irq, so we have to setup our 2346 * as soon as we call pci_request_irq, so we have to setup our
2284 * clean_rx handler before we do so. */ 2347 * clean_rx handler before we do so.
2348 */
2285 e1000_configure(adapter); 2349 e1000_configure(adapter);
2286 2350
2287 err = e1000_request_irq(adapter); 2351 err = e1000_request_irq(adapter);
@@ -2335,16 +2399,20 @@ static int e1000_close(struct net_device *netdev)
2335 e1000e_free_tx_resources(adapter); 2399 e1000e_free_tx_resources(adapter);
2336 e1000e_free_rx_resources(adapter); 2400 e1000e_free_rx_resources(adapter);
2337 2401
2338 /* kill manageability vlan ID if supported, but not if a vlan with 2402 /*
2339 * the same ID is registered on the host OS (let 8021q kill it) */ 2403 * kill manageability vlan ID if supported, but not if a vlan with
2404 * the same ID is registered on the host OS (let 8021q kill it)
2405 */
2340 if ((adapter->hw.mng_cookie.status & 2406 if ((adapter->hw.mng_cookie.status &
2341 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2407 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2342 !(adapter->vlgrp && 2408 !(adapter->vlgrp &&
2343 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) 2409 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2344 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2410 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2345 2411
2346 /* If AMT is enabled, let the firmware know that the network 2412 /*
2347 * interface is now closed */ 2413 * If AMT is enabled, let the firmware know that the network
2414 * interface is now closed
2415 */
2348 if ((adapter->flags & FLAG_HAS_AMT) && 2416 if ((adapter->flags & FLAG_HAS_AMT) &&
2349 e1000e_check_mng_mode(&adapter->hw)) 2417 e1000e_check_mng_mode(&adapter->hw))
2350 e1000_release_hw_control(adapter); 2418 e1000_release_hw_control(adapter);
@@ -2375,12 +2443,14 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2375 /* activate the work around */ 2443 /* activate the work around */
2376 e1000e_set_laa_state_82571(&adapter->hw, 1); 2444 e1000e_set_laa_state_82571(&adapter->hw, 1);
2377 2445
2378 /* Hold a copy of the LAA in RAR[14] This is done so that 2446 /*
2447 * Hold a copy of the LAA in RAR[14] This is done so that
2379 * between the time RAR[0] gets clobbered and the time it 2448 * between the time RAR[0] gets clobbered and the time it
2380 * gets fixed (in e1000_watchdog), the actual LAA is in one 2449 * gets fixed (in e1000_watchdog), the actual LAA is in one
2381 * of the RARs and no incoming packets directed to this port 2450 * of the RARs and no incoming packets directed to this port
2382 * are dropped. Eventually the LAA will be in RAR[0] and 2451 * are dropped. Eventually the LAA will be in RAR[0] and
2383 * RAR[14] */ 2452 * RAR[14]
2453 */
2384 e1000e_rar_set(&adapter->hw, 2454 e1000e_rar_set(&adapter->hw,
2385 adapter->hw.mac.addr, 2455 adapter->hw.mac.addr,
2386 adapter->hw.mac.rar_entry_count - 1); 2456 adapter->hw.mac.rar_entry_count - 1);
@@ -2389,8 +2459,10 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2389 return 0; 2459 return 0;
2390} 2460}
2391 2461
2392/* Need to wait a few seconds after link up to get diagnostic information from 2462/*
2393 * the phy */ 2463 * Need to wait a few seconds after link up to get diagnostic information from
2464 * the phy
2465 */
2394static void e1000_update_phy_info(unsigned long data) 2466static void e1000_update_phy_info(unsigned long data)
2395{ 2467{
2396 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2468 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -2421,7 +2493,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2421 2493
2422 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 2494 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2423 2495
2424 /* these counters are modified from e1000_adjust_tbi_stats, 2496 /*
2497 * these counters are modified from e1000_adjust_tbi_stats,
2425 * called from the interrupt context, so they must only 2498 * called from the interrupt context, so they must only
2426 * be written while holding adapter->stats_lock 2499 * be written while holding adapter->stats_lock
2427 */ 2500 */
@@ -2515,8 +2588,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2515 2588
2516 /* Rx Errors */ 2589 /* Rx Errors */
2517 2590
2518 /* RLEC on some newer hardware can be incorrect so build 2591 /*
2519 * our own version based on RUC and ROC */ 2592 * RLEC on some newer hardware can be incorrect so build
2593 * our own version based on RUC and ROC
2594 */
2520 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2595 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2521 adapter->stats.crcerrs + adapter->stats.algnerrc + 2596 adapter->stats.crcerrs + adapter->stats.algnerrc +
2522 adapter->stats.ruc + adapter->stats.roc + 2597 adapter->stats.ruc + adapter->stats.roc +
@@ -2628,8 +2703,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2628 &adapter->link_speed, 2703 &adapter->link_speed,
2629 &adapter->link_duplex); 2704 &adapter->link_duplex);
2630 e1000_print_link_info(adapter); 2705 e1000_print_link_info(adapter);
2631 /* tweak tx_queue_len according to speed/duplex 2706 /*
2632 * and adjust the timeout factor */ 2707 * tweak tx_queue_len according to speed/duplex
2708 * and adjust the timeout factor
2709 */
2633 netdev->tx_queue_len = adapter->tx_queue_len; 2710 netdev->tx_queue_len = adapter->tx_queue_len;
2634 adapter->tx_timeout_factor = 1; 2711 adapter->tx_timeout_factor = 1;
2635 switch (adapter->link_speed) { 2712 switch (adapter->link_speed) {
@@ -2645,8 +2722,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2645 break; 2722 break;
2646 } 2723 }
2647 2724
2648 /* workaround: re-program speed mode bit after 2725 /*
2649 * link-up event */ 2726 * workaround: re-program speed mode bit after
2727 * link-up event
2728 */
2650 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 2729 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2651 !txb2b) { 2730 !txb2b) {
2652 u32 tarc0; 2731 u32 tarc0;
@@ -2655,8 +2734,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2655 ew32(TARC0, tarc0); 2734 ew32(TARC0, tarc0);
2656 } 2735 }
2657 2736
2658 /* disable TSO for pcie and 10/100 speeds, to avoid 2737 /*
2659 * some hardware issues */ 2738 * disable TSO for pcie and 10/100 speeds, to avoid
2739 * some hardware issues
2740 */
2660 if (!(adapter->flags & FLAG_TSO_FORCE)) { 2741 if (!(adapter->flags & FLAG_TSO_FORCE)) {
2661 switch (adapter->link_speed) { 2742 switch (adapter->link_speed) {
2662 case SPEED_10: 2743 case SPEED_10:
@@ -2676,8 +2757,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2676 } 2757 }
2677 } 2758 }
2678 2759
2679 /* enable transmits in the hardware, need to do this 2760 /*
2680 * after setting TARC0 */ 2761 * enable transmits in the hardware, need to do this
2762 * after setting TARC(0)
2763 */
2681 tctl = er32(TCTL); 2764 tctl = er32(TCTL);
2682 tctl |= E1000_TCTL_EN; 2765 tctl |= E1000_TCTL_EN;
2683 ew32(TCTL, tctl); 2766 ew32(TCTL, tctl);
@@ -2731,23 +2814,27 @@ link_up:
2731 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 2814 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
2732 tx_ring->count); 2815 tx_ring->count);
2733 if (tx_pending) { 2816 if (tx_pending) {
2734 /* We've lost link, so the controller stops DMA, 2817 /*
2818 * We've lost link, so the controller stops DMA,
2735 * but we've got queued Tx work that's never going 2819 * but we've got queued Tx work that's never going
2736 * to get done, so reset controller to flush Tx. 2820 * to get done, so reset controller to flush Tx.
2737 * (Do the reset outside of interrupt context). */ 2821 * (Do the reset outside of interrupt context).
2822 */
2738 adapter->tx_timeout_count++; 2823 adapter->tx_timeout_count++;
2739 schedule_work(&adapter->reset_task); 2824 schedule_work(&adapter->reset_task);
2740 } 2825 }
2741 } 2826 }
2742 2827
2743 /* Cause software interrupt to ensure rx ring is cleaned */ 2828 /* Cause software interrupt to ensure Rx ring is cleaned */
2744 ew32(ICS, E1000_ICS_RXDMT0); 2829 ew32(ICS, E1000_ICS_RXDMT0);
2745 2830
2746 /* Force detection of hung controller every watchdog period */ 2831 /* Force detection of hung controller every watchdog period */
2747 adapter->detect_tx_hung = 1; 2832 adapter->detect_tx_hung = 1;
2748 2833
2749 /* With 82571 controllers, LAA may be overwritten due to controller 2834 /*
2750 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2835 * With 82571 controllers, LAA may be overwritten due to controller
2836 * reset from the other port. Set the appropriate LAA in RAR[0]
2837 */
2751 if (e1000e_get_laa_state_82571(hw)) 2838 if (e1000e_get_laa_state_82571(hw))
2752 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 2839 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
2753 2840
@@ -3023,16 +3110,20 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3023 3110
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3111 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3025 3112
3026 /* Force memory writes to complete before letting h/w 3113 /*
3114 * Force memory writes to complete before letting h/w
3027 * know there are new descriptors to fetch. (Only 3115 * know there are new descriptors to fetch. (Only
3028 * applicable for weak-ordered memory model archs, 3116 * applicable for weak-ordered memory model archs,
3029 * such as IA-64). */ 3117 * such as IA-64).
3118 */
3030 wmb(); 3119 wmb();
3031 3120
3032 tx_ring->next_to_use = i; 3121 tx_ring->next_to_use = i;
3033 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3122 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3034 /* we need this if more than one processor can write to our tail 3123 /*
3035 * at a time, it synchronizes IO on IA64/Altix systems */ 3124 * we need this if more than one processor can write to our tail
3125 * at a time, it synchronizes IO on IA64/Altix systems
3126 */
3036 mmiowb(); 3127 mmiowb();
3037} 3128}
3038 3129
@@ -3080,13 +3171,17 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3080 struct e1000_adapter *adapter = netdev_priv(netdev); 3171 struct e1000_adapter *adapter = netdev_priv(netdev);
3081 3172
3082 netif_stop_queue(netdev); 3173 netif_stop_queue(netdev);
3083 /* Herbert's original patch had: 3174 /*
3175 * Herbert's original patch had:
3084 * smp_mb__after_netif_stop_queue(); 3176 * smp_mb__after_netif_stop_queue();
3085 * but since that doesn't exist yet, just open code it. */ 3177 * but since that doesn't exist yet, just open code it.
3178 */
3086 smp_mb(); 3179 smp_mb();
3087 3180
3088 /* We need to check again in a case another CPU has just 3181 /*
3089 * made room available. */ 3182 * We need to check again in a case another CPU has just
3183 * made room available.
3184 */
3090 if (e1000_desc_unused(adapter->tx_ring) < size) 3185 if (e1000_desc_unused(adapter->tx_ring) < size)
3091 return -EBUSY; 3186 return -EBUSY;
3092 3187
@@ -3133,21 +3228,29 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3133 } 3228 }
3134 3229
3135 mss = skb_shinfo(skb)->gso_size; 3230 mss = skb_shinfo(skb)->gso_size;
3136 /* The controller does a simple calculation to 3231 /*
3232 * The controller does a simple calculation to
3137 * make sure there is enough room in the FIFO before 3233 * make sure there is enough room in the FIFO before
3138 * initiating the DMA for each buffer. The calc is: 3234 * initiating the DMA for each buffer. The calc is:
3139 * 4 = ceil(buffer len/mss). To make sure we don't 3235 * 4 = ceil(buffer len/mss). To make sure we don't
3140 * overrun the FIFO, adjust the max buffer len if mss 3236 * overrun the FIFO, adjust the max buffer len if mss
3141 * drops. */ 3237 * drops.
3238 */
3142 if (mss) { 3239 if (mss) {
3143 u8 hdr_len; 3240 u8 hdr_len;
3144 max_per_txd = min(mss << 2, max_per_txd); 3241 max_per_txd = min(mss << 2, max_per_txd);
3145 max_txd_pwr = fls(max_per_txd) - 1; 3242 max_txd_pwr = fls(max_per_txd) - 1;
3146 3243
3147 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3244 /*
3148 * points to just header, pull a few bytes of payload from 3245 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
3149 * frags into skb->data */ 3246 * points to just header, pull a few bytes of payload from
3247 * frags into skb->data
3248 */
3150 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3249 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3250 /*
3251 * we do this workaround for ES2LAN, but it is un-necessary,
3252 * avoiding it could save a lot of cycles
3253 */
3151 if (skb->data_len && (hdr_len == len)) { 3254 if (skb->data_len && (hdr_len == len)) {
3152 unsigned int pull_size; 3255 unsigned int pull_size;
3153 3256
@@ -3181,8 +3284,10 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3181 /* Collision - tell upper layer to requeue */ 3284 /* Collision - tell upper layer to requeue */
3182 return NETDEV_TX_LOCKED; 3285 return NETDEV_TX_LOCKED;
3183 3286
3184 /* need: count + 2 desc gap to keep tail from touching 3287 /*
3185 * head, otherwise try next time */ 3288 * need: count + 2 desc gap to keep tail from touching
3289 * head, otherwise try next time
3290 */
3186 if (e1000_maybe_stop_tx(netdev, count + 2)) { 3291 if (e1000_maybe_stop_tx(netdev, count + 2)) {
3187 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); 3292 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3188 return NETDEV_TX_BUSY; 3293 return NETDEV_TX_BUSY;
@@ -3207,9 +3312,11 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3207 else if (e1000_tx_csum(adapter, skb)) 3312 else if (e1000_tx_csum(adapter, skb))
3208 tx_flags |= E1000_TX_FLAGS_CSUM; 3313 tx_flags |= E1000_TX_FLAGS_CSUM;
3209 3314
3210 /* Old method was to assume IPv4 packet by default if TSO was enabled. 3315 /*
3316 * Old method was to assume IPv4 packet by default if TSO was enabled.
3211 * 82571 hardware supports TSO capabilities for IPv6 as well... 3317 * 82571 hardware supports TSO capabilities for IPv6 as well...
3212 * no longer assume, we must. */ 3318 * no longer assume, we must.
3319 */
3213 if (skb->protocol == htons(ETH_P_IP)) 3320 if (skb->protocol == htons(ETH_P_IP))
3214 tx_flags |= E1000_TX_FLAGS_IPV4; 3321 tx_flags |= E1000_TX_FLAGS_IPV4;
3215 3322
@@ -3311,10 +3418,12 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3311 if (netif_running(netdev)) 3418 if (netif_running(netdev))
3312 e1000e_down(adapter); 3419 e1000e_down(adapter);
3313 3420
3314 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3421 /*
3422 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3315 * means we reserve 2 more, this pushes us to allocate from the next 3423 * means we reserve 2 more, this pushes us to allocate from the next
3316 * larger slab size. 3424 * larger slab size.
3317 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3425 * i.e. RXBUFFER_2048 --> size-4096 slab
3426 */
3318 3427
3319 if (max_frame <= 256) 3428 if (max_frame <= 256)
3320 adapter->rx_buffer_len = 256; 3429 adapter->rx_buffer_len = 256;
@@ -3331,7 +3440,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3331 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3440 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3332 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 3441 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3333 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3442 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3334 + ETH_FCS_LEN ; 3443 + ETH_FCS_LEN;
3335 3444
3336 ndev_info(netdev, "changing MTU from %d to %d\n", 3445 ndev_info(netdev, "changing MTU from %d to %d\n",
3337 netdev->mtu, new_mtu); 3446 netdev->mtu, new_mtu);
@@ -3467,8 +3576,10 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3467 if (adapter->hw.phy.type == e1000_phy_igp_3) 3576 if (adapter->hw.phy.type == e1000_phy_igp_3)
3468 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 3577 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3469 3578
3470 /* Release control of h/w to f/w. If f/w is AMT enabled, this 3579 /*
3471 * would have already happened in close and is redundant. */ 3580 * Release control of h/w to f/w. If f/w is AMT enabled, this
3581 * would have already happened in close and is redundant.
3582 */
3472 e1000_release_hw_control(adapter); 3583 e1000_release_hw_control(adapter);
3473 3584
3474 pci_disable_device(pdev); 3585 pci_disable_device(pdev);
@@ -3543,9 +3654,11 @@ static int e1000_resume(struct pci_dev *pdev)
3543 3654
3544 netif_device_attach(netdev); 3655 netif_device_attach(netdev);
3545 3656
3546 /* If the controller has AMT, do not set DRV_LOAD until the interface 3657 /*
3658 * If the controller has AMT, do not set DRV_LOAD until the interface
3547 * is up. For all other cases, let the f/w know that the h/w is now 3659 * is up. For all other cases, let the f/w know that the h/w is now
3548 * under the control of the driver. */ 3660 * under the control of the driver.
3661 */
3549 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 3662 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3550 e1000_get_hw_control(adapter); 3663 e1000_get_hw_control(adapter);
3551 3664
@@ -3656,9 +3769,11 @@ static void e1000_io_resume(struct pci_dev *pdev)
3656 3769
3657 netif_device_attach(netdev); 3770 netif_device_attach(netdev);
3658 3771
3659 /* If the controller has AMT, do not set DRV_LOAD until the interface 3772 /*
3773 * If the controller has AMT, do not set DRV_LOAD until the interface
3660 * is up. For all other cases, let the f/w know that the h/w is now 3774 * is up. For all other cases, let the f/w know that the h/w is now
3661 * under the control of the driver. */ 3775 * under the control of the driver.
3776 */
3662 if (!(adapter->flags & FLAG_HAS_AMT) || 3777 if (!(adapter->flags & FLAG_HAS_AMT) ||
3663 !e1000e_check_mng_mode(&adapter->hw)) 3778 !e1000e_check_mng_mode(&adapter->hw))
3664 e1000_get_hw_control(adapter); 3779 e1000_get_hw_control(adapter);
@@ -3852,15 +3967,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3852 if (pci_using_dac) 3967 if (pci_using_dac)
3853 netdev->features |= NETIF_F_HIGHDMA; 3968 netdev->features |= NETIF_F_HIGHDMA;
3854 3969
3855 /* We should not be using LLTX anymore, but we are still TX faster with 3970 /*
3856 * it. */ 3971 * We should not be using LLTX anymore, but we are still Tx faster with
3972 * it.
3973 */
3857 netdev->features |= NETIF_F_LLTX; 3974 netdev->features |= NETIF_F_LLTX;
3858 3975
3859 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 3976 if (e1000e_enable_mng_pass_thru(&adapter->hw))
3860 adapter->flags |= FLAG_MNG_PT_ENABLED; 3977 adapter->flags |= FLAG_MNG_PT_ENABLED;
3861 3978
3862 /* before reading the NVM, reset the controller to 3979 /*
3863 * put the device in a known good starting state */ 3980 * before reading the NVM, reset the controller to
3981 * put the device in a known good starting state
3982 */
3864 adapter->hw.mac.ops.reset_hw(&adapter->hw); 3983 adapter->hw.mac.ops.reset_hw(&adapter->hw);
3865 3984
3866 /* 3985 /*
@@ -3954,9 +4073,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3954 /* reset the hardware with the new settings */ 4073 /* reset the hardware with the new settings */
3955 e1000e_reset(adapter); 4074 e1000e_reset(adapter);
3956 4075
3957 /* If the controller has AMT, do not set DRV_LOAD until the interface 4076 /*
4077 * If the controller has AMT, do not set DRV_LOAD until the interface
3958 * is up. For all other cases, let the f/w know that the h/w is now 4078 * is up. For all other cases, let the f/w know that the h/w is now
3959 * under the control of the driver. */ 4079 * under the control of the driver.
4080 */
3960 if (!(adapter->flags & FLAG_HAS_AMT) || 4081 if (!(adapter->flags & FLAG_HAS_AMT) ||
3961 !e1000e_check_mng_mode(&adapter->hw)) 4082 !e1000e_check_mng_mode(&adapter->hw))
3962 e1000_get_hw_control(adapter); 4083 e1000_get_hw_control(adapter);
@@ -4013,16 +4134,20 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4013 struct net_device *netdev = pci_get_drvdata(pdev); 4134 struct net_device *netdev = pci_get_drvdata(pdev);
4014 struct e1000_adapter *adapter = netdev_priv(netdev); 4135 struct e1000_adapter *adapter = netdev_priv(netdev);
4015 4136
4016 /* flush_scheduled work may reschedule our watchdog task, so 4137 /*
4017 * explicitly disable watchdog tasks from being rescheduled */ 4138 * flush_scheduled work may reschedule our watchdog task, so
4139 * explicitly disable watchdog tasks from being rescheduled
4140 */
4018 set_bit(__E1000_DOWN, &adapter->state); 4141 set_bit(__E1000_DOWN, &adapter->state);
4019 del_timer_sync(&adapter->watchdog_timer); 4142 del_timer_sync(&adapter->watchdog_timer);
4020 del_timer_sync(&adapter->phy_info_timer); 4143 del_timer_sync(&adapter->phy_info_timer);
4021 4144
4022 flush_scheduled_work(); 4145 flush_scheduled_work();
4023 4146
4024 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4147 /*
4025 * would have already happened in close and is redundant. */ 4148 * Release control of h/w to f/w. If f/w is AMT enabled, this
4149 * would have already happened in close and is redundant.
4150 */
4026 e1000_release_hw_control(adapter); 4151 e1000_release_hw_control(adapter);
4027 4152
4028 unregister_netdev(netdev); 4153 unregister_netdev(netdev);
@@ -4060,13 +4185,16 @@ static struct pci_device_id e1000_pci_tbl[] = {
4060 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 4185 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
4061 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 4186 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
4062 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 4187 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
4188
4063 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 4189 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 4190 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 4191 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 4192 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4193
4067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 4194 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4068 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 4195 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 4196 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4197
4070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 4198 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4071 board_80003es2lan }, 4199 board_80003es2lan },
4072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 4200 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4075,6 +4203,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4075 board_80003es2lan }, 4203 board_80003es2lan },
4076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 4204 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4077 board_80003es2lan }, 4205 board_80003es2lan },
4206
4078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 4207 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 4208 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 4209 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
@@ -4082,6 +4211,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 4211 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 4212 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 4213 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4214
4085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 4215 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 4216 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4217 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
@@ -4099,7 +4229,7 @@ static struct pci_driver e1000_driver = {
4099 .probe = e1000_probe, 4229 .probe = e1000_probe,
4100 .remove = __devexit_p(e1000_remove), 4230 .remove = __devexit_p(e1000_remove),
4101#ifdef CONFIG_PM 4231#ifdef CONFIG_PM
4102 /* Power Managment Hooks */ 4232 /* Power Management Hooks */
4103 .suspend = e1000_suspend, 4233 .suspend = e1000_suspend,
4104 .resume = e1000_resume, 4234 .resume = e1000_resume,
4105#endif 4235#endif
@@ -4118,7 +4248,7 @@ static int __init e1000_init_module(void)
4118 int ret; 4248 int ret;
4119 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 4249 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4120 e1000e_driver_name, e1000e_driver_version); 4250 e1000e_driver_name, e1000e_driver_version);
4121 printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", 4251 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4122 e1000e_driver_name); 4252 e1000e_driver_name);
4123 ret = pci_register_driver(&e1000_driver); 4253 ret = pci_register_driver(&e1000_driver);
4124 4254
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index df266c32ac4b..a66b92efcf80 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,8 @@
30 30
31#include "e1000.h" 31#include "e1000.h"
32 32
33/* This is the only thing that needs to be changed to adjust the 33/*
34 * This is the only thing that needs to be changed to adjust the
34 * maximum number of ports that the driver can manage. 35 * maximum number of ports that the driver can manage.
35 */ 36 */
36 37
@@ -46,7 +47,8 @@ module_param(copybreak, uint, 0644);
46MODULE_PARM_DESC(copybreak, 47MODULE_PARM_DESC(copybreak,
47 "Maximum size of packet that is copied to a new buffer on receive"); 48 "Maximum size of packet that is copied to a new buffer on receive");
48 49
49/* All parameters are treated the same, as an integer array of values. 50/*
51 * All parameters are treated the same, as an integer array of values.
50 * This macro just reduces the need to repeat the same declaration code 52 * This macro just reduces the need to repeat the same declaration code
51 * over and over (plus this helps to avoid typo bugs). 53 * over and over (plus this helps to avoid typo bugs).
52 */ 54 */
@@ -60,8 +62,9 @@ MODULE_PARM_DESC(copybreak,
60 MODULE_PARM_DESC(X, desc); 62 MODULE_PARM_DESC(X, desc);
61 63
62 64
63/* Transmit Interrupt Delay in units of 1.024 microseconds 65/*
64 * Tx interrupt delay needs to typically be set to something non zero 66 * Transmit Interrupt Delay in units of 1.024 microseconds
67 * Tx interrupt delay needs to typically be set to something non zero
65 * 68 *
66 * Valid Range: 0-65535 69 * Valid Range: 0-65535
67 */ 70 */
@@ -70,7 +73,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
70#define MAX_TXDELAY 0xFFFF 73#define MAX_TXDELAY 0xFFFF
71#define MIN_TXDELAY 0 74#define MIN_TXDELAY 0
72 75
73/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds 76/*
77 * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
74 * 78 *
75 * Valid Range: 0-65535 79 * Valid Range: 0-65535
76 */ 80 */
@@ -79,8 +83,9 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
79#define MAX_TXABSDELAY 0xFFFF 83#define MAX_TXABSDELAY 0xFFFF
80#define MIN_TXABSDELAY 0 84#define MIN_TXABSDELAY 0
81 85
82/* Receive Interrupt Delay in units of 1.024 microseconds 86/*
83 * hardware will likely hang if you set this to anything but zero. 87 * Receive Interrupt Delay in units of 1.024 microseconds
88 * hardware will likely hang if you set this to anything but zero.
84 * 89 *
85 * Valid Range: 0-65535 90 * Valid Range: 0-65535
86 */ 91 */
@@ -89,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
89#define MAX_RXDELAY 0xFFFF 94#define MAX_RXDELAY 0xFFFF
90#define MIN_RXDELAY 0 95#define MIN_RXDELAY 0
91 96
92/* Receive Absolute Interrupt Delay in units of 1.024 microseconds 97/*
98 * Receive Absolute Interrupt Delay in units of 1.024 microseconds
93 * 99 *
94 * Valid Range: 0-65535 100 * Valid Range: 0-65535
95 */ 101 */
@@ -98,7 +104,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
98#define MAX_RXABSDELAY 0xFFFF 104#define MAX_RXABSDELAY 0xFFFF
99#define MIN_RXABSDELAY 0 105#define MIN_RXABSDELAY 0
100 106
101/* Interrupt Throttle Rate (interrupts/sec) 107/*
108 * Interrupt Throttle Rate (interrupts/sec)
102 * 109 *
103 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 110 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
104 */ 111 */
@@ -107,7 +114,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
107#define MAX_ITR 100000 114#define MAX_ITR 100000
108#define MIN_ITR 100 115#define MIN_ITR 100
109 116
110/* Enable Smart Power Down of the PHY 117/*
118 * Enable Smart Power Down of the PHY
111 * 119 *
112 * Valid Range: 0, 1 120 * Valid Range: 0, 1
113 * 121 *
@@ -115,7 +123,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
115 */ 123 */
116E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); 124E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
117 125
118/* Enable Kumeran Lock Loss workaround 126/*
127 * Enable Kumeran Lock Loss workaround
119 * 128 *
120 * Valid Range: 0, 1 129 * Valid Range: 0, 1
121 * 130 *
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index dab3c468a768..a2da1c422354 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -134,7 +134,8 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
134 return -E1000_ERR_PARAM; 134 return -E1000_ERR_PARAM;
135 } 135 }
136 136
137 /* Set up Op-code, Phy Address, and register offset in the MDI 137 /*
138 * Set up Op-code, Phy Address, and register offset in the MDI
138 * Control register. The MAC will take care of interfacing with the 139 * Control register. The MAC will take care of interfacing with the
139 * PHY to retrieve the desired data. 140 * PHY to retrieve the desired data.
140 */ 141 */
@@ -144,7 +145,11 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
144 145
145 ew32(MDIC, mdic); 146 ew32(MDIC, mdic);
146 147
147 /* Poll the ready bit to see if the MDI read completed */ 148 /*
149 * Poll the ready bit to see if the MDI read completed
150 * Increasing the time out as testing showed failures with
151 * the lower time out
152 */
148 for (i = 0; i < 64; i++) { 153 for (i = 0; i < 64; i++) {
149 udelay(50); 154 udelay(50);
150 mdic = er32(MDIC); 155 mdic = er32(MDIC);
@@ -182,7 +187,8 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
182 return -E1000_ERR_PARAM; 187 return -E1000_ERR_PARAM;
183 } 188 }
184 189
185 /* Set up Op-code, Phy Address, and register offset in the MDI 190 /*
191 * Set up Op-code, Phy Address, and register offset in the MDI
186 * Control register. The MAC will take care of interfacing with the 192 * Control register. The MAC will take care of interfacing with the
187 * PHY to retrieve the desired data. 193 * PHY to retrieve the desired data.
188 */ 194 */
@@ -409,14 +415,15 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
409 s32 ret_val; 415 s32 ret_val;
410 u16 phy_data; 416 u16 phy_data;
411 417
412 /* Enable CRS on TX. This must be set for half-duplex operation. */ 418 /* Enable CRS on Tx. This must be set for half-duplex operation. */
413 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 419 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
414 if (ret_val) 420 if (ret_val)
415 return ret_val; 421 return ret_val;
416 422
417 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 423 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
418 424
419 /* Options: 425 /*
426 * Options:
420 * MDI/MDI-X = 0 (default) 427 * MDI/MDI-X = 0 (default)
421 * 0 - Auto for all speeds 428 * 0 - Auto for all speeds
422 * 1 - MDI mode 429 * 1 - MDI mode
@@ -441,7 +448,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
441 break; 448 break;
442 } 449 }
443 450
444 /* Options: 451 /*
452 * Options:
445 * disable_polarity_correction = 0 (default) 453 * disable_polarity_correction = 0 (default)
446 * Automatic Correction for Reversed Cable Polarity 454 * Automatic Correction for Reversed Cable Polarity
447 * 0 - Disabled 455 * 0 - Disabled
@@ -456,7 +464,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
456 return ret_val; 464 return ret_val;
457 465
458 if (phy->revision < 4) { 466 if (phy->revision < 4) {
459 /* Force TX_CLK in the Extended PHY Specific Control Register 467 /*
468 * Force TX_CLK in the Extended PHY Specific Control Register
460 * to 25MHz clock. 469 * to 25MHz clock.
461 */ 470 */
462 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 471 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -543,19 +552,21 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
543 552
544 /* set auto-master slave resolution settings */ 553 /* set auto-master slave resolution settings */
545 if (hw->mac.autoneg) { 554 if (hw->mac.autoneg) {
546 /* when autonegotiation advertisement is only 1000Mbps then we 555 /*
556 * when autonegotiation advertisement is only 1000Mbps then we
547 * should disable SmartSpeed and enable Auto MasterSlave 557 * should disable SmartSpeed and enable Auto MasterSlave
548 * resolution as hardware default. */ 558 * resolution as hardware default.
559 */
549 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { 560 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
550 /* Disable SmartSpeed */ 561 /* Disable SmartSpeed */
551 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 562 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
552 &data); 563 &data);
553 if (ret_val) 564 if (ret_val)
554 return ret_val; 565 return ret_val;
555 566
556 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 567 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
557 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 568 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
558 data); 569 data);
559 if (ret_val) 570 if (ret_val)
560 return ret_val; 571 return ret_val;
561 572
@@ -630,14 +641,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
630 return ret_val; 641 return ret_val;
631 } 642 }
632 643
633 /* Need to parse both autoneg_advertised and fc and set up 644 /*
645 * Need to parse both autoneg_advertised and fc and set up
634 * the appropriate PHY registers. First we will parse for 646 * the appropriate PHY registers. First we will parse for
635 * autoneg_advertised software override. Since we can advertise 647 * autoneg_advertised software override. Since we can advertise
636 * a plethora of combinations, we need to check each bit 648 * a plethora of combinations, we need to check each bit
637 * individually. 649 * individually.
638 */ 650 */
639 651
640 /* First we clear all the 10/100 mb speed bits in the Auto-Neg 652 /*
653 * First we clear all the 10/100 mb speed bits in the Auto-Neg
641 * Advertisement Register (Address 4) and the 1000 mb speed bits in 654 * Advertisement Register (Address 4) and the 1000 mb speed bits in
642 * the 1000Base-T Control Register (Address 9). 655 * the 1000Base-T Control Register (Address 9).
643 */ 656 */
@@ -683,7 +696,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
683 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 696 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
684 } 697 }
685 698
686 /* Check for a software override of the flow control settings, and 699 /*
700 * Check for a software override of the flow control settings, and
687 * setup the PHY advertisement registers accordingly. If 701 * setup the PHY advertisement registers accordingly. If
688 * auto-negotiation is enabled, then software will have to set the 702 * auto-negotiation is enabled, then software will have to set the
689 * "PAUSE" bits to the correct value in the Auto-Negotiation 703 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -696,38 +710,42 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
696 * but not send pause frames). 710 * but not send pause frames).
697 * 2: Tx flow control is enabled (we can send pause frames 711 * 2: Tx flow control is enabled (we can send pause frames
698 * but we do not support receiving pause frames). 712 * but we do not support receiving pause frames).
699 * 3: Both Rx and TX flow control (symmetric) are enabled. 713 * 3: Both Rx and Tx flow control (symmetric) are enabled.
700 * other: No software override. The flow control configuration 714 * other: No software override. The flow control configuration
701 * in the EEPROM is used. 715 * in the EEPROM is used.
702 */ 716 */
703 switch (hw->mac.fc) { 717 switch (hw->mac.fc) {
704 case e1000_fc_none: 718 case e1000_fc_none:
705 /* Flow control (RX & TX) is completely disabled by a 719 /*
720 * Flow control (Rx & Tx) is completely disabled by a
706 * software over-ride. 721 * software over-ride.
707 */ 722 */
708 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 723 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
709 break; 724 break;
710 case e1000_fc_rx_pause: 725 case e1000_fc_rx_pause:
711 /* RX Flow control is enabled, and TX Flow control is 726 /*
727 * Rx Flow control is enabled, and Tx Flow control is
712 * disabled, by a software over-ride. 728 * disabled, by a software over-ride.
713 */ 729 *
714 /* Since there really isn't a way to advertise that we are 730 * Since there really isn't a way to advertise that we are
715 * capable of RX Pause ONLY, we will advertise that we 731 * capable of Rx Pause ONLY, we will advertise that we
716 * support both symmetric and asymmetric RX PAUSE. Later 732 * support both symmetric and asymmetric Rx PAUSE. Later
717 * (in e1000e_config_fc_after_link_up) we will disable the 733 * (in e1000e_config_fc_after_link_up) we will disable the
718 * hw's ability to send PAUSE frames. 734 * hw's ability to send PAUSE frames.
719 */ 735 */
720 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 736 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
721 break; 737 break;
722 case e1000_fc_tx_pause: 738 case e1000_fc_tx_pause:
723 /* TX Flow control is enabled, and RX Flow control is 739 /*
740 * Tx Flow control is enabled, and Rx Flow control is
724 * disabled, by a software over-ride. 741 * disabled, by a software over-ride.
725 */ 742 */
726 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 743 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
727 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 744 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
728 break; 745 break;
729 case e1000_fc_full: 746 case e1000_fc_full:
730 /* Flow control (both RX and TX) is enabled by a software 747 /*
748 * Flow control (both Rx and Tx) is enabled by a software
731 * over-ride. 749 * over-ride.
732 */ 750 */
733 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 751 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -758,7 +776,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
758 * Performs initial bounds checking on autoneg advertisement parameter, then 776 * Performs initial bounds checking on autoneg advertisement parameter, then
759 * configure to advertise the full capability. Setup the PHY to autoneg 777 * configure to advertise the full capability. Setup the PHY to autoneg
760 * and restart the negotiation process between the link partner. If 778 * and restart the negotiation process between the link partner. If
761 * wait_for_link, then wait for autoneg to complete before exiting. 779 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
762 **/ 780 **/
763static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) 781static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
764{ 782{
@@ -766,12 +784,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
766 s32 ret_val; 784 s32 ret_val;
767 u16 phy_ctrl; 785 u16 phy_ctrl;
768 786
769 /* Perform some bounds checking on the autoneg advertisement 787 /*
788 * Perform some bounds checking on the autoneg advertisement
770 * parameter. 789 * parameter.
771 */ 790 */
772 phy->autoneg_advertised &= phy->autoneg_mask; 791 phy->autoneg_advertised &= phy->autoneg_mask;
773 792
774 /* If autoneg_advertised is zero, we assume it was not defaulted 793 /*
794 * If autoneg_advertised is zero, we assume it was not defaulted
775 * by the calling code so we set to advertise full capability. 795 * by the calling code so we set to advertise full capability.
776 */ 796 */
777 if (phy->autoneg_advertised == 0) 797 if (phy->autoneg_advertised == 0)
@@ -785,7 +805,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
785 } 805 }
786 hw_dbg(hw, "Restarting Auto-Neg\n"); 806 hw_dbg(hw, "Restarting Auto-Neg\n");
787 807
788 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 808 /*
809 * Restart auto-negotiation by setting the Auto Neg Enable bit and
789 * the Auto Neg Restart bit in the PHY control register. 810 * the Auto Neg Restart bit in the PHY control register.
790 */ 811 */
791 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); 812 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -797,7 +818,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
797 if (ret_val) 818 if (ret_val)
798 return ret_val; 819 return ret_val;
799 820
800 /* Does the user want to wait for Auto-Neg to complete here, or 821 /*
822 * Does the user want to wait for Auto-Neg to complete here, or
801 * check at a later time (for example, callback routine). 823 * check at a later time (for example, callback routine).
802 */ 824 */
803 if (phy->wait_for_link) { 825 if (phy->wait_for_link) {
@@ -829,14 +851,18 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
829 bool link; 851 bool link;
830 852
831 if (hw->mac.autoneg) { 853 if (hw->mac.autoneg) {
832 /* Setup autoneg and flow control advertisement and perform 854 /*
833 * autonegotiation. */ 855 * Setup autoneg and flow control advertisement and perform
856 * autonegotiation.
857 */
834 ret_val = e1000_copper_link_autoneg(hw); 858 ret_val = e1000_copper_link_autoneg(hw);
835 if (ret_val) 859 if (ret_val)
836 return ret_val; 860 return ret_val;
837 } else { 861 } else {
838 /* PHY will be set to 10H, 10F, 100H or 100F 862 /*
839 * depending on user settings. */ 863 * PHY will be set to 10H, 10F, 100H or 100F
864 * depending on user settings.
865 */
840 hw_dbg(hw, "Forcing Speed and Duplex\n"); 866 hw_dbg(hw, "Forcing Speed and Duplex\n");
841 ret_val = e1000_phy_force_speed_duplex(hw); 867 ret_val = e1000_phy_force_speed_duplex(hw);
842 if (ret_val) { 868 if (ret_val) {
@@ -845,7 +871,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
845 } 871 }
846 } 872 }
847 873
848 /* Check link status. Wait up to 100 microseconds for link to become 874 /*
875 * Check link status. Wait up to 100 microseconds for link to become
849 * valid. 876 * valid.
850 */ 877 */
851 ret_val = e1000e_phy_has_link_generic(hw, 878 ret_val = e1000e_phy_has_link_generic(hw,
@@ -891,7 +918,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
891 if (ret_val) 918 if (ret_val)
892 return ret_val; 919 return ret_val;
893 920
894 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 921 /*
922 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
895 * forced whenever speed and duplex are forced. 923 * forced whenever speed and duplex are forced.
896 */ 924 */
897 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 925 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -941,7 +969,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
941 * Calls the PHY setup function to force speed and duplex. Clears the 969 * Calls the PHY setup function to force speed and duplex. Clears the
942 * auto-crossover to force MDI manually. Resets the PHY to commit the 970 * auto-crossover to force MDI manually. Resets the PHY to commit the
943 * changes. If time expires while waiting for link up, we reset the DSP. 971 * changes. If time expires while waiting for link up, we reset the DSP.
944 * After reset, TX_CLK and CRS on TX must be set. Return successful upon 972 * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
945 * successful completion, else return corresponding error code. 973 * successful completion, else return corresponding error code.
946 **/ 974 **/
947s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) 975s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
@@ -951,7 +979,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
951 u16 phy_data; 979 u16 phy_data;
952 bool link; 980 bool link;
953 981
954 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 982 /*
983 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
955 * forced whenever speed and duplex are forced. 984 * forced whenever speed and duplex are forced.
956 */ 985 */
957 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 986 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -989,10 +1018,12 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
989 return ret_val; 1018 return ret_val;
990 1019
991 if (!link) { 1020 if (!link) {
992 /* We didn't get link. 1021 /*
1022 * We didn't get link.
993 * Reset the DSP and cross our fingers. 1023 * Reset the DSP and cross our fingers.
994 */ 1024 */
995 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); 1025 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1026 0x001d);
996 if (ret_val) 1027 if (ret_val)
997 return ret_val; 1028 return ret_val;
998 ret_val = e1000e_phy_reset_dsp(hw); 1029 ret_val = e1000e_phy_reset_dsp(hw);
@@ -1011,7 +1042,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1011 if (ret_val) 1042 if (ret_val)
1012 return ret_val; 1043 return ret_val;
1013 1044
1014 /* Resetting the phy means we need to re-force TX_CLK in the 1045 /*
1046 * Resetting the phy means we need to re-force TX_CLK in the
1015 * Extended PHY Specific Control Register to 25MHz clock from 1047 * Extended PHY Specific Control Register to 25MHz clock from
1016 * the reset value of 2.5MHz. 1048 * the reset value of 2.5MHz.
1017 */ 1049 */
@@ -1020,7 +1052,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1020 if (ret_val) 1052 if (ret_val)
1021 return ret_val; 1053 return ret_val;
1022 1054
1023 /* In addition, we must re-enable CRS on Tx for both half and full 1055 /*
1056 * In addition, we must re-enable CRS on Tx for both half and full
1024 * duplex. 1057 * duplex.
1025 */ 1058 */
1026 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1059 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1124,30 +1157,32 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1124 data); 1157 data);
1125 if (ret_val) 1158 if (ret_val)
1126 return ret_val; 1159 return ret_val;
1127 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1160 /*
1161 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1128 * during Dx states where the power conservation is most 1162 * during Dx states where the power conservation is most
1129 * important. During driver activity we should enable 1163 * important. During driver activity we should enable
1130 * SmartSpeed, so performance is maintained. */ 1164 * SmartSpeed, so performance is maintained.
1165 */
1131 if (phy->smart_speed == e1000_smart_speed_on) { 1166 if (phy->smart_speed == e1000_smart_speed_on) {
1132 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1167 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1133 &data); 1168 &data);
1134 if (ret_val) 1169 if (ret_val)
1135 return ret_val; 1170 return ret_val;
1136 1171
1137 data |= IGP01E1000_PSCFR_SMART_SPEED; 1172 data |= IGP01E1000_PSCFR_SMART_SPEED;
1138 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1173 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1139 data); 1174 data);
1140 if (ret_val) 1175 if (ret_val)
1141 return ret_val; 1176 return ret_val;
1142 } else if (phy->smart_speed == e1000_smart_speed_off) { 1177 } else if (phy->smart_speed == e1000_smart_speed_off) {
1143 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1178 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1144 &data); 1179 &data);
1145 if (ret_val) 1180 if (ret_val)
1146 return ret_val; 1181 return ret_val;
1147 1182
1148 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1183 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1149 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1184 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1150 data); 1185 data);
1151 if (ret_val) 1186 if (ret_val)
1152 return ret_val; 1187 return ret_val;
1153 } 1188 }
@@ -1249,8 +1284,10 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1249 s32 ret_val; 1284 s32 ret_val;
1250 u16 data, offset, mask; 1285 u16 data, offset, mask;
1251 1286
1252 /* Polarity is determined based on the speed of 1287 /*
1253 * our connection. */ 1288 * Polarity is determined based on the speed of
1289 * our connection.
1290 */
1254 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1291 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1255 if (ret_val) 1292 if (ret_val)
1256 return ret_val; 1293 return ret_val;
@@ -1260,7 +1297,8 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1260 offset = IGP01E1000_PHY_PCS_INIT_REG; 1297 offset = IGP01E1000_PHY_PCS_INIT_REG;
1261 mask = IGP01E1000_PHY_POLARITY_MASK; 1298 mask = IGP01E1000_PHY_POLARITY_MASK;
1262 } else { 1299 } else {
1263 /* This really only applies to 10Mbps since 1300 /*
1301 * This really only applies to 10Mbps since
1264 * there is no polarity for 100Mbps (always 0). 1302 * there is no polarity for 100Mbps (always 0).
1265 */ 1303 */
1266 offset = IGP01E1000_PHY_PORT_STATUS; 1304 offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1278,7 +1316,7 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1278} 1316}
1279 1317
1280/** 1318/**
1281 * e1000_wait_autoneg - Wait for auto-neg compeletion 1319 * e1000_wait_autoneg - Wait for auto-neg completion
1282 * @hw: pointer to the HW structure 1320 * @hw: pointer to the HW structure
1283 * 1321 *
1284 * Waits for auto-negotiation to complete or for the auto-negotiation time 1322 * Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1302,7 +1340,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1302 msleep(100); 1340 msleep(100);
1303 } 1341 }
1304 1342
1305 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation 1343 /*
1344 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1306 * has completed. 1345 * has completed.
1307 */ 1346 */
1308 return ret_val; 1347 return ret_val;
@@ -1324,7 +1363,8 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1324 u16 i, phy_status; 1363 u16 i, phy_status;
1325 1364
1326 for (i = 0; i < iterations; i++) { 1365 for (i = 0; i < iterations; i++) {
1327 /* Some PHYs require the PHY_STATUS register to be read 1366 /*
1367 * Some PHYs require the PHY_STATUS register to be read
1328 * twice due to the link bit being sticky. No harm doing 1368 * twice due to the link bit being sticky. No harm doing
1329 * it across the board. 1369 * it across the board.
1330 */ 1370 */
@@ -1412,10 +1452,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1412 if (ret_val) 1452 if (ret_val)
1413 return ret_val; 1453 return ret_val;
1414 1454
1415 /* Getting bits 15:9, which represent the combination of 1455 /*
1456 * Getting bits 15:9, which represent the combination of
1416 * course and fine gain values. The result is a number 1457 * course and fine gain values. The result is a number
1417 * that can be put into the lookup table to obtain the 1458 * that can be put into the lookup table to obtain the
1418 * approximate cable length. */ 1459 * approximate cable length.
1460 */
1419 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 1461 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1420 IGP02E1000_AGC_LENGTH_MASK; 1462 IGP02E1000_AGC_LENGTH_MASK;
1421 1463