aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/ich8lan.c
diff options
context:
space:
mode:
authorBruce Allan <bruce.w.allan@intel.com>2008-03-28 12:15:03 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-28 22:14:56 -0400
commitad68076e07fa01bd0c98278a959d0fd2bb26f1ac (patch)
treef0b664ecdb38478f9b995aff10dcb39a09221fb6 /drivers/net/e1000e/ich8lan.c
parent652f093fdf14c7ca1e13c052da429ae385e4dc21 (diff)
e1000e: reformat comment blocks, cosmetic changes only
Adjusting the comment blocks here to be code-style compliant. no code changes. Changed some copyright dates to 2008. Indentation fixes. Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000e/ich8lan.c')
-rw-r--r--drivers/net/e1000e/ich8lan.c262
1 files changed, 158 insertions, 104 deletions
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 0ae39550768..84401564811 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
243 u32 sector_end_addr; 243 u32 sector_end_addr;
244 u16 i; 244 u16 i;
245 245
246 /* Can't read flash registers if the register set isn't mapped. 246 /* Can't read flash registers if the register set isn't mapped. */
247 */
248 if (!hw->flash_address) { 247 if (!hw->flash_address) {
249 hw_dbg(hw, "ERROR: Flash registers not mapped\n"); 248 hw_dbg(hw, "ERROR: Flash registers not mapped\n");
250 return -E1000_ERR_CONFIG; 249 return -E1000_ERR_CONFIG;
@@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
254 253
255 gfpreg = er32flash(ICH_FLASH_GFPREG); 254 gfpreg = er32flash(ICH_FLASH_GFPREG);
256 255
257 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 256 /*
257 * sector_X_addr is a "sector"-aligned address (4096 bytes)
258 * Add 1 to sector_end_addr since this sector is included in 258 * Add 1 to sector_end_addr since this sector is included in
259 * the overall size. */ 259 * the overall size.
260 */
260 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 261 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
261 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 262 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
262 263
263 /* flash_base_addr is byte-aligned */ 264 /* flash_base_addr is byte-aligned */
264 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 265 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
265 266
266 /* find total size of the NVM, then cut in half since the total 267 /*
267 * size represents two separate NVM banks. */ 268 * find total size of the NVM, then cut in half since the total
269 * size represents two separate NVM banks.
270 */
268 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 271 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
269 << FLASH_SECTOR_ADDR_SHIFT; 272 << FLASH_SECTOR_ADDR_SHIFT;
270 nvm->flash_bank_size /= 2; 273 nvm->flash_bank_size /= 2;
@@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
496 if (ret_val) 499 if (ret_val)
497 return ret_val; 500 return ret_val;
498 501
499 /* Initialize the PHY from the NVM on ICH platforms. This 502 /*
503 * Initialize the PHY from the NVM on ICH platforms. This
500 * is needed due to an issue where the NVM configuration is 504 * is needed due to an issue where the NVM configuration is
501 * not properly autoloaded after power transitions. 505 * not properly autoloaded after power transitions.
502 * Therefore, after each PHY reset, we will load the 506 * Therefore, after each PHY reset, we will load the
@@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
523 udelay(100); 527 udelay(100);
524 } while ((!data) && --loop); 528 } while ((!data) && --loop);
525 529
526 /* If basic configuration is incomplete before the above loop 530 /*
531 * If basic configuration is incomplete before the above loop
527 * count reaches 0, loading the configuration from NVM will 532 * count reaches 0, loading the configuration from NVM will
528 * leave the PHY in a bad state possibly resulting in no link. 533 * leave the PHY in a bad state possibly resulting in no link.
529 */ 534 */
@@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
536 data &= ~E1000_STATUS_LAN_INIT_DONE; 541 data &= ~E1000_STATUS_LAN_INIT_DONE;
537 ew32(STATUS, data); 542 ew32(STATUS, data);
538 543
539 /* Make sure HW does not configure LCD from PHY 544 /*
540 * extended configuration before SW configuration */ 545 * Make sure HW does not configure LCD from PHY
546 * extended configuration before SW configuration
547 */
541 data = er32(EXTCNF_CTRL); 548 data = er32(EXTCNF_CTRL);
542 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 549 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
543 return 0; 550 return 0;
@@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
551 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 558 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
552 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 559 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
553 560
554 /* Configure LCD from extended configuration 561 /* Configure LCD from extended configuration region. */
555 * region. */
556 562
557 /* cnf_base_addr is in DWORD */ 563 /* cnf_base_addr is in DWORD */
558 word_addr = (u16)(cnf_base_addr << 1); 564 word_addr = (u16)(cnf_base_addr << 1);
@@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
681 s32 ret_val; 687 s32 ret_val;
682 u16 phy_data, offset, mask; 688 u16 phy_data, offset, mask;
683 689
684 /* Polarity is determined based on the reversal feature 690 /*
685 * being enabled. 691 * Polarity is determined based on the reversal feature being enabled.
686 */ 692 */
687 if (phy->polarity_correction) { 693 if (phy->polarity_correction) {
688 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; 694 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
731 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 737 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
732 ew32(PHY_CTRL, phy_ctrl); 738 ew32(PHY_CTRL, phy_ctrl);
733 739
734 /* Call gig speed drop workaround on LPLU before accessing 740 /*
735 * any PHY registers */ 741 * Call gig speed drop workaround on LPLU before accessing
742 * any PHY registers
743 */
736 if ((hw->mac.type == e1000_ich8lan) && 744 if ((hw->mac.type == e1000_ich8lan) &&
737 (hw->phy.type == e1000_phy_igp_3)) 745 (hw->phy.type == e1000_phy_igp_3))
738 e1000e_gig_downshift_workaround_ich8lan(hw); 746 e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
747 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 755 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
748 ew32(PHY_CTRL, phy_ctrl); 756 ew32(PHY_CTRL, phy_ctrl);
749 757
750 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 758 /*
759 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
751 * during Dx states where the power conservation is most 760 * during Dx states where the power conservation is most
752 * important. During driver activity we should enable 761 * important. During driver activity we should enable
753 * SmartSpeed, so performance is maintained. */ 762 * SmartSpeed, so performance is maintained.
763 */
754 if (phy->smart_speed == e1000_smart_speed_on) { 764 if (phy->smart_speed == e1000_smart_speed_on) {
755 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 765 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
756 &data); 766 &data);
757 if (ret_val) 767 if (ret_val)
758 return ret_val; 768 return ret_val;
759 769
760 data |= IGP01E1000_PSCFR_SMART_SPEED; 770 data |= IGP01E1000_PSCFR_SMART_SPEED;
761 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 771 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
762 data); 772 data);
763 if (ret_val) 773 if (ret_val)
764 return ret_val; 774 return ret_val;
765 } else if (phy->smart_speed == e1000_smart_speed_off) { 775 } else if (phy->smart_speed == e1000_smart_speed_off) {
766 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 776 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
767 &data); 777 &data);
768 if (ret_val) 778 if (ret_val)
769 return ret_val; 779 return ret_val;
770 780
771 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 781 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
772 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 782 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
773 data); 783 data);
774 if (ret_val) 784 if (ret_val)
775 return ret_val; 785 return ret_val;
776 } 786 }
@@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
804 if (!active) { 814 if (!active) {
805 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 815 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
806 ew32(PHY_CTRL, phy_ctrl); 816 ew32(PHY_CTRL, phy_ctrl);
807 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 817 /*
818 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
808 * during Dx states where the power conservation is most 819 * during Dx states where the power conservation is most
809 * important. During driver activity we should enable 820 * important. During driver activity we should enable
810 * SmartSpeed, so performance is maintained. */ 821 * SmartSpeed, so performance is maintained.
822 */
811 if (phy->smart_speed == e1000_smart_speed_on) { 823 if (phy->smart_speed == e1000_smart_speed_on) {
812 ret_val = e1e_rphy(hw, 824 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
813 IGP01E1000_PHY_PORT_CONFIG, 825 &data);
814 &data);
815 if (ret_val) 826 if (ret_val)
816 return ret_val; 827 return ret_val;
817 828
818 data |= IGP01E1000_PSCFR_SMART_SPEED; 829 data |= IGP01E1000_PSCFR_SMART_SPEED;
819 ret_val = e1e_wphy(hw, 830 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
820 IGP01E1000_PHY_PORT_CONFIG, 831 data);
821 data);
822 if (ret_val) 832 if (ret_val)
823 return ret_val; 833 return ret_val;
824 } else if (phy->smart_speed == e1000_smart_speed_off) { 834 } else if (phy->smart_speed == e1000_smart_speed_off) {
825 ret_val = e1e_rphy(hw, 835 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
826 IGP01E1000_PHY_PORT_CONFIG, 836 &data);
827 &data);
828 if (ret_val) 837 if (ret_val)
829 return ret_val; 838 return ret_val;
830 839
831 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 840 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
832 ret_val = e1e_wphy(hw, 841 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
833 IGP01E1000_PHY_PORT_CONFIG, 842 data);
834 data);
835 if (ret_val) 843 if (ret_val)
836 return ret_val; 844 return ret_val;
837 } 845 }
@@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
841 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 849 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
842 ew32(PHY_CTRL, phy_ctrl); 850 ew32(PHY_CTRL, phy_ctrl);
843 851
844 /* Call gig speed drop workaround on LPLU before accessing 852 /*
845 * any PHY registers */ 853 * Call gig speed drop workaround on LPLU before accessing
854 * any PHY registers
855 */
846 if ((hw->mac.type == e1000_ich8lan) && 856 if ((hw->mac.type == e1000_ich8lan) &&
847 (hw->phy.type == e1000_phy_igp_3)) 857 (hw->phy.type == e1000_phy_igp_3))
848 e1000e_gig_downshift_workaround_ich8lan(hw); 858 e1000e_gig_downshift_workaround_ich8lan(hw);
849 859
850 /* When LPLU is enabled, we should disable SmartSpeed */ 860 /* When LPLU is enabled, we should disable SmartSpeed */
851 ret_val = e1e_rphy(hw, 861 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
852 IGP01E1000_PHY_PORT_CONFIG,
853 &data);
854 if (ret_val) 862 if (ret_val)
855 return ret_val; 863 return ret_val;
856 864
857 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 865 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
858 ret_val = e1e_wphy(hw, 866 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
859 IGP01E1000_PHY_PORT_CONFIG,
860 data);
861 } 867 }
862 868
863 return 0; 869 return 0;
@@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
944 950
945 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 951 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
946 952
947 /* Either we should have a hardware SPI cycle in progress 953 /*
954 * Either we should have a hardware SPI cycle in progress
948 * bit to check against, in order to start a new cycle or 955 * bit to check against, in order to start a new cycle or
949 * FDONE bit should be changed in the hardware so that it 956 * FDONE bit should be changed in the hardware so that it
950 * is 1 after hardware reset, which can then be used as an 957 * is 1 after hardware reset, which can then be used as an
@@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
953 */ 960 */
954 961
955 if (hsfsts.hsf_status.flcinprog == 0) { 962 if (hsfsts.hsf_status.flcinprog == 0) {
956 /* There is no cycle running at present, 963 /*
957 * so we can start a cycle */ 964 * There is no cycle running at present,
958 /* Begin by setting Flash Cycle Done. */ 965 * so we can start a cycle
966 * Begin by setting Flash Cycle Done.
967 */
959 hsfsts.hsf_status.flcdone = 1; 968 hsfsts.hsf_status.flcdone = 1;
960 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 969 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
961 ret_val = 0; 970 ret_val = 0;
962 } else { 971 } else {
963 /* otherwise poll for sometime so the current 972 /*
964 * cycle has a chance to end before giving up. */ 973 * otherwise poll for sometime so the current
974 * cycle has a chance to end before giving up.
975 */
965 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 976 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
966 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); 977 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
967 if (hsfsts.hsf_status.flcinprog == 0) { 978 if (hsfsts.hsf_status.flcinprog == 0) {
@@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
971 udelay(1); 982 udelay(1);
972 } 983 }
973 if (ret_val == 0) { 984 if (ret_val == 0) {
974 /* Successful in waiting for previous cycle to timeout, 985 /*
975 * now set the Flash Cycle Done. */ 986 * Successful in waiting for previous cycle to timeout,
987 * now set the Flash Cycle Done.
988 */
976 hsfsts.hsf_status.flcdone = 1; 989 hsfsts.hsf_status.flcdone = 1;
977 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 990 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
978 } else { 991 } else {
@@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1077 ret_val = e1000_flash_cycle_ich8lan(hw, 1090 ret_val = e1000_flash_cycle_ich8lan(hw,
1078 ICH_FLASH_READ_COMMAND_TIMEOUT); 1091 ICH_FLASH_READ_COMMAND_TIMEOUT);
1079 1092
1080 /* Check if FCERR is set to 1, if set to 1, clear it 1093 /*
1094 * Check if FCERR is set to 1, if set to 1, clear it
1081 * and try the whole sequence a few more times, else 1095 * and try the whole sequence a few more times, else
1082 * read in (shift in) the Flash Data0, the order is 1096 * read in (shift in) the Flash Data0, the order is
1083 * least significant byte first msb to lsb */ 1097 * least significant byte first msb to lsb
1098 */
1084 if (ret_val == 0) { 1099 if (ret_val == 0) {
1085 flash_data = er32flash(ICH_FLASH_FDATA0); 1100 flash_data = er32flash(ICH_FLASH_FDATA0);
1086 if (size == 1) { 1101 if (size == 1) {
@@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1090 } 1105 }
1091 break; 1106 break;
1092 } else { 1107 } else {
1093 /* If we've gotten here, then things are probably 1108 /*
1109 * If we've gotten here, then things are probably
1094 * completely hosed, but if the error condition is 1110 * completely hosed, but if the error condition is
1095 * detected, it won't hurt to give it another try... 1111 * detected, it won't hurt to give it another try...
1096 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 1112 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1168 1184
1169 ret_val = e1000e_update_nvm_checksum_generic(hw); 1185 ret_val = e1000e_update_nvm_checksum_generic(hw);
1170 if (ret_val) 1186 if (ret_val)
1171 return ret_val;; 1187 return ret_val;
1172 1188
1173 if (nvm->type != e1000_nvm_flash_sw) 1189 if (nvm->type != e1000_nvm_flash_sw)
1174 return ret_val;; 1190 return ret_val;
1175 1191
1176 ret_val = e1000_acquire_swflag_ich8lan(hw); 1192 ret_val = e1000_acquire_swflag_ich8lan(hw);
1177 if (ret_val) 1193 if (ret_val)
1178 return ret_val;; 1194 return ret_val;
1179 1195
1180 /* We're writing to the opposite bank so if we're on bank 1, 1196 /*
1197 * We're writing to the opposite bank so if we're on bank 1,
1181 * write to bank 0 etc. We also need to erase the segment that 1198 * write to bank 0 etc. We also need to erase the segment that
1182 * is going to be written */ 1199 * is going to be written
1200 */
1183 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1201 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
1184 new_bank_offset = nvm->flash_bank_size; 1202 new_bank_offset = nvm->flash_bank_size;
1185 old_bank_offset = 0; 1203 old_bank_offset = 0;
@@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1191 } 1209 }
1192 1210
1193 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1211 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
1194 /* Determine whether to write the value stored 1212 /*
1213 * Determine whether to write the value stored
1195 * in the other NVM bank or a modified value stored 1214 * in the other NVM bank or a modified value stored
1196 * in the shadow RAM */ 1215 * in the shadow RAM
1216 */
1197 if (dev_spec->shadow_ram[i].modified) { 1217 if (dev_spec->shadow_ram[i].modified) {
1198 data = dev_spec->shadow_ram[i].value; 1218 data = dev_spec->shadow_ram[i].value;
1199 } else { 1219 } else {
@@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1202 &data); 1222 &data);
1203 } 1223 }
1204 1224
1205 /* If the word is 0x13, then make sure the signature bits 1225 /*
1226 * If the word is 0x13, then make sure the signature bits
1206 * (15:14) are 11b until the commit has completed. 1227 * (15:14) are 11b until the commit has completed.
1207 * This will allow us to write 10b which indicates the 1228 * This will allow us to write 10b which indicates the
1208 * signature is valid. We want to do this after the write 1229 * signature is valid. We want to do this after the write
1209 * has completed so that we don't mark the segment valid 1230 * has completed so that we don't mark the segment valid
1210 * while the write is still in progress */ 1231 * while the write is still in progress
1232 */
1211 if (i == E1000_ICH_NVM_SIG_WORD) 1233 if (i == E1000_ICH_NVM_SIG_WORD)
1212 data |= E1000_ICH_NVM_SIG_MASK; 1234 data |= E1000_ICH_NVM_SIG_MASK;
1213 1235
@@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1230 break; 1252 break;
1231 } 1253 }
1232 1254
1233 /* Don't bother writing the segment valid bits if sector 1255 /*
1234 * programming failed. */ 1256 * Don't bother writing the segment valid bits if sector
1257 * programming failed.
1258 */
1235 if (ret_val) { 1259 if (ret_val) {
1236 hw_dbg(hw, "Flash commit failed.\n"); 1260 hw_dbg(hw, "Flash commit failed.\n");
1237 e1000_release_swflag_ich8lan(hw); 1261 e1000_release_swflag_ich8lan(hw);
1238 return ret_val; 1262 return ret_val;
1239 } 1263 }
1240 1264
1241 /* Finally validate the new segment by setting bit 15:14 1265 /*
1266 * Finally validate the new segment by setting bit 15:14
1242 * to 10b in word 0x13 , this can be done without an 1267 * to 10b in word 0x13 , this can be done without an
1243 * erase as well since these bits are 11 to start with 1268 * erase as well since these bits are 11 to start with
1244 * and we need to change bit 14 to 0b */ 1269 * and we need to change bit 14 to 0b
1270 */
1245 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1271 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1246 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1272 e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1247 data &= 0xBFFF; 1273 data &= 0xBFFF;
@@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1253 return ret_val; 1279 return ret_val;
1254 } 1280 }
1255 1281
1256 /* And invalidate the previously valid segment by setting 1282 /*
1283 * And invalidate the previously valid segment by setting
1257 * its signature word (0x13) high_byte to 0b. This can be 1284 * its signature word (0x13) high_byte to 0b. This can be
1258 * done without an erase because flash erase sets all bits 1285 * done without an erase because flash erase sets all bits
1259 * to 1's. We can write 1's to 0's without an erase */ 1286 * to 1's. We can write 1's to 0's without an erase
1287 */
1260 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1288 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1261 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1289 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1262 if (ret_val) { 1290 if (ret_val) {
@@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1272 1300
1273 e1000_release_swflag_ich8lan(hw); 1301 e1000_release_swflag_ich8lan(hw);
1274 1302
1275 /* Reload the EEPROM, or else modifications will not appear 1303 /*
1304 * Reload the EEPROM, or else modifications will not appear
1276 * until after the next adapter reset. 1305 * until after the next adapter reset.
1277 */ 1306 */
1278 e1000e_reload_nvm(hw); 1307 e1000e_reload_nvm(hw);
@@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1294 s32 ret_val; 1323 s32 ret_val;
1295 u16 data; 1324 u16 data;
1296 1325
1297 /* Read 0x19 and check bit 6. If this bit is 0, the checksum 1326 /*
1327 * Read 0x19 and check bit 6. If this bit is 0, the checksum
1298 * needs to be fixed. This bit is an indication that the NVM 1328 * needs to be fixed. This bit is an indication that the NVM
1299 * was prepared by OEM software and did not calculate the 1329 * was prepared by OEM software and did not calculate the
1300 * checksum...a likely scenario. 1330 * checksum...a likely scenario.
@@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1364 1394
1365 ew32flash(ICH_FLASH_FDATA0, flash_data); 1395 ew32flash(ICH_FLASH_FDATA0, flash_data);
1366 1396
1367 /* check if FCERR is set to 1 , if set to 1, clear it 1397 /*
1368 * and try the whole sequence a few more times else done */ 1398 * check if FCERR is set to 1 , if set to 1, clear it
1399 * and try the whole sequence a few more times else done
1400 */
1369 ret_val = e1000_flash_cycle_ich8lan(hw, 1401 ret_val = e1000_flash_cycle_ich8lan(hw,
1370 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 1402 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
1371 if (!ret_val) 1403 if (!ret_val)
1372 break; 1404 break;
1373 1405
1374 /* If we're here, then things are most likely 1406 /*
1407 * If we're here, then things are most likely
1375 * completely hosed, but if the error condition 1408 * completely hosed, but if the error condition
1376 * is detected, it won't hurt to give it another 1409 * is detected, it won't hurt to give it another
1377 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 1410 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1462 1495
1463 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1496 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1464 1497
1465 /* Determine HW Sector size: Read BERASE bits of hw flash status 1498 /*
1466 * register */ 1499 * Determine HW Sector size: Read BERASE bits of hw flash status
1467 /* 00: The Hw sector is 256 bytes, hence we need to erase 16 1500 * register
1501 * 00: The Hw sector is 256 bytes, hence we need to erase 16
1468 * consecutive sectors. The start index for the nth Hw sector 1502 * consecutive sectors. The start index for the nth Hw sector
1469 * can be calculated as = bank * 4096 + n * 256 1503 * can be calculated as = bank * 4096 + n * 256
1470 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 1504 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
@@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1511 if (ret_val) 1545 if (ret_val)
1512 return ret_val; 1546 return ret_val;
1513 1547
1514 /* Write a value 11 (block Erase) in Flash 1548 /*
1515 * Cycle field in hw flash control */ 1549 * Write a value 11 (block Erase) in Flash
1550 * Cycle field in hw flash control
1551 */
1516 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 1552 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1517 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 1553 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
1518 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 1554 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1519 1555
1520 /* Write the last 24 bits of an index within the 1556 /*
1557 * Write the last 24 bits of an index within the
1521 * block into Flash Linear address field in Flash 1558 * block into Flash Linear address field in Flash
1522 * Address. 1559 * Address.
1523 */ 1560 */
@@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1529 if (ret_val == 0) 1566 if (ret_val == 0)
1530 break; 1567 break;
1531 1568
1532 /* Check if FCERR is set to 1. If 1, 1569 /*
1570 * Check if FCERR is set to 1. If 1,
1533 * clear it and try the whole sequence 1571 * clear it and try the whole sequence
1534 * a few more times else Done */ 1572 * a few more times else Done
1573 */
1535 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1574 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1536 if (hsfsts.hsf_status.flcerr == 1) 1575 if (hsfsts.hsf_status.flcerr == 1)
1537 /* repeat for some time before 1576 /* repeat for some time before giving up */
1538 * giving up */
1539 continue; 1577 continue;
1540 else if (hsfsts.hsf_status.flcdone == 0) 1578 else if (hsfsts.hsf_status.flcdone == 0)
1541 return ret_val; 1579 return ret_val;
@@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
1585 1623
1586 ret_val = e1000e_get_bus_info_pcie(hw); 1624 ret_val = e1000e_get_bus_info_pcie(hw);
1587 1625
1588 /* ICH devices are "PCI Express"-ish. They have 1626 /*
1627 * ICH devices are "PCI Express"-ish. They have
1589 * a configuration space, but do not contain 1628 * a configuration space, but do not contain
1590 * PCI Express Capability registers, so bus width 1629 * PCI Express Capability registers, so bus width
1591 * must be hardcoded. 1630 * must be hardcoded.
@@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1608 u32 ctrl, icr, kab; 1647 u32 ctrl, icr, kab;
1609 s32 ret_val; 1648 s32 ret_val;
1610 1649
1611 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1650 /*
1651 * Prevent the PCI-E bus from sticking if there is no TLP connection
1612 * on the last TLP read/write transaction when MAC is reset. 1652 * on the last TLP read/write transaction when MAC is reset.
1613 */ 1653 */
1614 ret_val = e1000e_disable_pcie_master(hw); 1654 ret_val = e1000e_disable_pcie_master(hw);
@@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1619 hw_dbg(hw, "Masking off all interrupts\n"); 1659 hw_dbg(hw, "Masking off all interrupts\n");
1620 ew32(IMC, 0xffffffff); 1660 ew32(IMC, 0xffffffff);
1621 1661
1622 /* Disable the Transmit and Receive units. Then delay to allow 1662 /*
1663 * Disable the Transmit and Receive units. Then delay to allow
1623 * any pending transactions to complete before we hit the MAC 1664 * any pending transactions to complete before we hit the MAC
1624 * with the global reset. 1665 * with the global reset.
1625 */ 1666 */
@@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1640 ctrl = er32(CTRL); 1681 ctrl = er32(CTRL);
1641 1682
1642 if (!e1000_check_reset_block(hw)) { 1683 if (!e1000_check_reset_block(hw)) {
1643 /* PHY HW reset requires MAC CORE reset at the same 1684 /*
1685 * PHY HW reset requires MAC CORE reset at the same
1644 * time to make sure the interface between MAC and the 1686 * time to make sure the interface between MAC and the
1645 * external PHY is reset. 1687 * external PHY is reset.
1646 */ 1688 */
@@ -1724,8 +1766,10 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1724 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1766 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1725 ew32(TXDCTL1, txdctl); 1767 ew32(TXDCTL1, txdctl);
1726 1768
1727 /* ICH8 has opposite polarity of no_snoop bits. 1769 /*
1728 * By default, we should use snoop behavior. */ 1770 * ICH8 has opposite polarity of no_snoop bits.
1771 * By default, we should use snoop behavior.
1772 */
1729 if (mac->type == e1000_ich8lan) 1773 if (mac->type == e1000_ich8lan)
1730 snoop = PCIE_ICH8_SNOOP_ALL; 1774 snoop = PCIE_ICH8_SNOOP_ALL;
1731 else 1775 else
@@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1736 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 1780 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1737 ew32(CTRL_EXT, ctrl_ext); 1781 ew32(CTRL_EXT, ctrl_ext);
1738 1782
1739 /* Clear all of the statistics registers (clear on read). It is 1783 /*
1784 * Clear all of the statistics registers (clear on read). It is
1740 * important that we do this after we have tried to establish link 1785 * important that we do this after we have tried to establish link
1741 * because the symbol error count will increment wildly if there 1786 * because the symbol error count will increment wildly if there
1742 * is no link. 1787 * is no link.
@@ -1813,7 +1858,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
1813 if (e1000_check_reset_block(hw)) 1858 if (e1000_check_reset_block(hw))
1814 return 0; 1859 return 0;
1815 1860
1816 /* ICH parts do not have a word in the NVM to determine 1861 /*
1862 * ICH parts do not have a word in the NVM to determine
1817 * the default flow control setting, so we explicitly 1863 * the default flow control setting, so we explicitly
1818 * set it to full. 1864 * set it to full.
1819 */ 1865 */
@@ -1853,9 +1899,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1853 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1899 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1854 ew32(CTRL, ctrl); 1900 ew32(CTRL, ctrl);
1855 1901
1856 /* Set the mac to wait the maximum time between each iteration 1902 /*
1903 * Set the mac to wait the maximum time between each iteration
1857 * and increase the max iterations when polling the phy; 1904 * and increase the max iterations when polling the phy;
1858 * this fixes erroneous timeouts at 10Mbps. */ 1905 * this fixes erroneous timeouts at 10Mbps.
1906 */
1859 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1907 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1860 if (ret_val) 1908 if (ret_val)
1861 return ret_val; 1909 return ret_val;
@@ -1882,7 +1930,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1882 * @speed: pointer to store current link speed 1930 * @speed: pointer to store current link speed
1883 * @duplex: pointer to store the current link duplex 1931 * @duplex: pointer to store the current link duplex
1884 * 1932 *
1885 * Calls the generic get_speed_and_duplex to retreive the current link 1933 * Calls the generic get_speed_and_duplex to retrieve the current link
1886 * information and then calls the Kumeran lock loss workaround for links at 1934 * information and then calls the Kumeran lock loss workaround for links at
1887 * gigabit speeds. 1935 * gigabit speeds.
1888 **/ 1936 **/
@@ -1930,9 +1978,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1930 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 1978 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
1931 return 0; 1979 return 0;
1932 1980
1933 /* Make sure link is up before proceeding. If not just return. 1981 /*
1982 * Make sure link is up before proceeding. If not just return.
1934 * Attempting this while link is negotiating fouled up link 1983 * Attempting this while link is negotiating fouled up link
1935 * stability */ 1984 * stability
1985 */
1936 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1986 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1937 if (!link) 1987 if (!link)
1938 return 0; 1988 return 0;
@@ -1961,8 +2011,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2011 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
1962 ew32(PHY_CTRL, phy_ctrl); 2012 ew32(PHY_CTRL, phy_ctrl);
1963 2013
1964 /* Call gig speed drop workaround on Gig disable before accessing 2014 /*
1965 * any PHY registers */ 2015 * Call gig speed drop workaround on Gig disable before accessing
2016 * any PHY registers
2017 */
1966 e1000e_gig_downshift_workaround_ich8lan(hw); 2018 e1000e_gig_downshift_workaround_ich8lan(hw);
1967 2019
1968 /* unable to acquire PCS lock */ 2020 /* unable to acquire PCS lock */
@@ -1970,7 +2022,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1970} 2022}
1971 2023
1972/** 2024/**
1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state 2025 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
1974 * @hw: pointer to the HW structure 2026 * @hw: pointer to the HW structure
1975 * @state: boolean value used to set the current Kumeran workaround state 2027 * @state: boolean value used to set the current Kumeran workaround state
1976 * 2028 *
@@ -2017,8 +2069,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2069 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
2018 ew32(PHY_CTRL, reg); 2070 ew32(PHY_CTRL, reg);
2019 2071
2020 /* Call gig speed drop workaround on Gig disable before 2072 /*
2021 * accessing any PHY registers */ 2073 * Call gig speed drop workaround on Gig disable before
2074 * accessing any PHY registers
2075 */
2022 if (hw->mac.type == e1000_ich8lan) 2076 if (hw->mac.type == e1000_ich8lan)
2023 e1000e_gig_downshift_workaround_ich8lan(hw); 2077 e1000e_gig_downshift_workaround_ich8lan(hw);
2024 2078