aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_common.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c947
1 files changed, 477 insertions, 470 deletions
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index ebbda7d15254..bcd952916eb2 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
54 57
55/** 58/**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
139 IXGBE_READ_REG(hw, IXGBE_MRFC); 142 IXGBE_READ_REG(hw, IXGBE_MRFC);
140 IXGBE_READ_REG(hw, IXGBE_RLEC); 143 IXGBE_READ_REG(hw, IXGBE_RLEC);
141 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 144 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
142 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
143 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 145 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 146 if (hw->mac.type >= ixgbe_mac_82599EB) {
147 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
148 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
149 } else {
150 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
151 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
152 }
145 153
146 for (i = 0; i < 8; i++) { 154 for (i = 0; i < 8; i++) {
147 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 155 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
148 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 156 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 157 if (hw->mac.type >= ixgbe_mac_82599EB) {
158 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
159 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
160 } else {
161 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
162 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
163 }
151 } 164 }
152 165 if (hw->mac.type >= ixgbe_mac_82599EB)
166 for (i = 0; i < 8; i++)
167 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
153 IXGBE_READ_REG(hw, IXGBE_PRC64); 168 IXGBE_READ_REG(hw, IXGBE_PRC64);
154 IXGBE_READ_REG(hw, IXGBE_PRC127); 169 IXGBE_READ_REG(hw, IXGBE_PRC127);
155 IXGBE_READ_REG(hw, IXGBE_PRC255); 170 IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
187 IXGBE_READ_REG(hw, IXGBE_BPTC); 202 IXGBE_READ_REG(hw, IXGBE_BPTC);
188 for (i = 0; i < 16; i++) { 203 for (i = 0; i < 16; i++) {
189 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 204 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
190 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 205 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
192 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 206 if (hw->mac.type >= ixgbe_mac_82599EB) {
207 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
208 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
209 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
210 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
211 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
212 } else {
213 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
214 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
215 }
216 }
217
218 if (hw->mac.type == ixgbe_mac_X540) {
219 if (hw->phy.id == 0)
220 hw->phy.ops.identify(hw);
221 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
222 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
223 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
224 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
193 } 225 }
194 226
195 return 0; 227 return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
454 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 486 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
455 * access and verify no pending requests 487 * access and verify no pending requests
456 */ 488 */
457 if (ixgbe_disable_pcie_master(hw) != 0) 489 ixgbe_disable_pcie_master(hw);
458 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
459 490
460 return 0; 491 return 0;
461} 492}
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
603 ixgbe_shift_out_eeprom_bits(hw, data, 16); 634 ixgbe_shift_out_eeprom_bits(hw, data, 16);
604 ixgbe_standby_eeprom(hw); 635 ixgbe_standby_eeprom(hw);
605 636
606 msleep(hw->eeprom.semaphore_delay);
607 /* Done with writing - release the EEPROM */ 637 /* Done with writing - release the EEPROM */
608 ixgbe_release_eeprom(hw); 638 ixgbe_release_eeprom(hw);
609 } 639 }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
747static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 777static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
748{ 778{
749 s32 status = 0; 779 s32 status = 0;
750 u32 eec = 0; 780 u32 eec;
751 u32 i; 781 u32 i;
752 782
753 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 783 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
754 status = IXGBE_ERR_SWFW_SYNC; 784 status = IXGBE_ERR_SWFW_SYNC;
755 785
756 if (status == 0) { 786 if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
773 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 803 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
774 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 804 hw_dbg(hw, "Could not acquire EEPROM grant\n");
775 805
776 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 806 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
777 status = IXGBE_ERR_EEPROM; 807 status = IXGBE_ERR_EEPROM;
778 } 808 }
779 }
780 809
781 /* Setup EEPROM for Read/Write */ 810 /* Setup EEPROM for Read/Write */
782 if (status == 0) { 811 if (status == 0) {
783 /* Clear CS and SK */ 812 /* Clear CS and SK */
784 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 813 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
785 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 814 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
786 IXGBE_WRITE_FLUSH(hw); 815 IXGBE_WRITE_FLUSH(hw);
787 udelay(1); 816 udelay(1);
817 }
788 } 818 }
789 return status; 819 return status;
790} 820}
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
798static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 828static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
799{ 829{
800 s32 status = IXGBE_ERR_EEPROM; 830 s32 status = IXGBE_ERR_EEPROM;
801 u32 timeout; 831 u32 timeout = 2000;
802 u32 i; 832 u32 i;
803 u32 swsm; 833 u32 swsm;
804 834
805 /* Set timeout value based on size of EEPROM */
806 timeout = hw->eeprom.word_size + 1;
807
808 /* Get SMBI software semaphore between device drivers first */ 835 /* Get SMBI software semaphore between device drivers first */
809 for (i = 0; i < timeout; i++) { 836 for (i = 0; i < timeout; i++) {
810 /* 837 /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
816 status = 0; 843 status = 0;
817 break; 844 break;
818 } 845 }
819 msleep(1); 846 udelay(50);
820 } 847 }
821 848
822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 849 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
844 * was not granted because we don't have access to the EEPROM 871 * was not granted because we don't have access to the EEPROM
845 */ 872 */
846 if (i >= timeout) { 873 if (i >= timeout) {
847 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 874 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
848 "not granted.\n"); 875 "not granted.\n");
849 ixgbe_release_eeprom_semaphore(hw); 876 ixgbe_release_eeprom_semaphore(hw);
850 status = IXGBE_ERR_EEPROM; 877 status = IXGBE_ERR_EEPROM;
851 } 878 }
879 } else {
880 hw_dbg(hw, "Software semaphore SMBI between device drivers "
881 "not granted.\n");
852 } 882 }
853 883
854 return status; 884 return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1080 eec &= ~IXGBE_EEC_REQ; 1110 eec &= ~IXGBE_EEC_REQ;
1081 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1111 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1082 1112
1083 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1113 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1114
1115 /* Delay before attempt to obtain semaphore again to allow FW access */
1116 msleep(hw->eeprom.semaphore_delay);
1084} 1117}
1085 1118
1086/** 1119/**
1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1120 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1088 * @hw: pointer to hardware structure 1121 * @hw: pointer to hardware structure
1089 **/ 1122 **/
1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1123u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1190 if (status == 0) { 1223 if (status == 0) {
1191 checksum = hw->eeprom.ops.calc_checksum(hw); 1224 checksum = hw->eeprom.ops.calc_checksum(hw);
1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1225 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1193 checksum); 1226 checksum);
1194 } else { 1227 } else {
1195 hw_dbg(hw, "EEPROM read failed\n"); 1228 hw_dbg(hw, "EEPROM read failed\n");
1196 } 1229 }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1238 u32 rar_low, rar_high; 1271 u32 rar_low, rar_high;
1239 u32 rar_entries = hw->mac.num_rar_entries; 1272 u32 rar_entries = hw->mac.num_rar_entries;
1240 1273
1274 /* Make sure we are using a valid rar index range */
1275 if (index >= rar_entries) {
1276 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1277 return IXGBE_ERR_INVALID_ARGUMENT;
1278 }
1279
1241 /* setup VMDq pool selection before this RAR gets enabled */ 1280 /* setup VMDq pool selection before this RAR gets enabled */
1242 hw->mac.ops.set_vmdq(hw, index, vmdq); 1281 hw->mac.ops.set_vmdq(hw, index, vmdq);
1243 1282
1244 /* Make sure we are using a valid rar index range */ 1283 /*
1245 if (index < rar_entries) { 1284 * HW expects these in little endian so we reverse the byte
1246 /* 1285 * order from network order (big endian) to little endian
1247 * HW expects these in little endian so we reverse the byte 1286 */
1248 * order from network order (big endian) to little endian 1287 rar_low = ((u32)addr[0] |
1249 */ 1288 ((u32)addr[1] << 8) |
1250 rar_low = ((u32)addr[0] | 1289 ((u32)addr[2] << 16) |
1251 ((u32)addr[1] << 8) | 1290 ((u32)addr[3] << 24));
1252 ((u32)addr[2] << 16) | 1291 /*
1253 ((u32)addr[3] << 24)); 1292 * Some parts put the VMDq setting in the extra RAH bits,
1254 /* 1293 * so save everything except the lower 16 bits that hold part
1255 * Some parts put the VMDq setting in the extra RAH bits, 1294 * of the address and the address valid bit.
1256 * so save everything except the lower 16 bits that hold part 1295 */
1257 * of the address and the address valid bit. 1296 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1258 */ 1297 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1298 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1260 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1261 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1262 1299
1263 if (enable_addr != 0) 1300 if (enable_addr != 0)
1264 rar_high |= IXGBE_RAH_AV; 1301 rar_high |= IXGBE_RAH_AV;
1265 1302
1266 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1303 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1267 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1304 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1268 } else {
1269 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1270 return IXGBE_ERR_RAR_INDEX;
1271 }
1272 1305
1273 return 0; 1306 return 0;
1274} 1307}
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1286 u32 rar_entries = hw->mac.num_rar_entries; 1319 u32 rar_entries = hw->mac.num_rar_entries;
1287 1320
1288 /* Make sure we are using a valid rar index range */ 1321 /* Make sure we are using a valid rar index range */
1289 if (index < rar_entries) { 1322 if (index >= rar_entries) {
1290 /*
1291 * Some parts put the VMDq setting in the extra RAH bits,
1292 * so save everything except the lower 16 bits that hold part
1293 * of the address and the address valid bit.
1294 */
1295 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1296 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1297
1298 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1299 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1300 } else {
1301 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1323 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1302 return IXGBE_ERR_RAR_INDEX; 1324 return IXGBE_ERR_INVALID_ARGUMENT;
1303 } 1325 }
1304 1326
1305 /* clear VMDq pool/queue selection for this RAR */ 1327 /*
1306 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1328 * Some parts put the VMDq setting in the extra RAH bits,
1307 1329 * so save everything except the lower 16 bits that hold part
1308 return 0; 1330 * of the address and the address valid bit.
1309} 1331 */
1310
1311/**
1312 * ixgbe_enable_rar - Enable Rx address register
1313 * @hw: pointer to hardware structure
1314 * @index: index into the RAR table
1315 *
1316 * Enables the select receive address register.
1317 **/
1318static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1319{
1320 u32 rar_high;
1321
1322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1332 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1323 rar_high |= IXGBE_RAH_AV; 1333 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1334
1335 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1336 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1325}
1326 1337
1327/** 1338 /* clear VMDq pool/queue selection for this RAR */
1328 * ixgbe_disable_rar - Disable Rx address register 1339 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1329 * @hw: pointer to hardware structure
1330 * @index: index into the RAR table
1331 *
1332 * Disables the select receive address register.
1333 **/
1334static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1335{
1336 u32 rar_high;
1337 1340
1338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1341 return 0;
1339 rar_high &= (~IXGBE_RAH_AV);
1340 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1341} 1342}
1342 1343
1343/** 1344/**
@@ -1386,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1386 } 1387 }
1387 1388
1388 /* Clear the MTA */ 1389 /* Clear the MTA */
1389 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1390 hw->addr_ctrl.mta_in_use = 0; 1390 hw->addr_ctrl.mta_in_use = 0;
1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1392 1392
@@ -1401,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1401} 1401}
1402 1402
1403/** 1403/**
1404 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1405 * @hw: pointer to hardware structure
1406 * @addr: new address
1407 *
1408 * Adds it to unused receive address register or goes into promiscuous mode.
1409 **/
1410static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1411{
1412 u32 rar_entries = hw->mac.num_rar_entries;
1413 u32 rar;
1414
1415 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1416 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1417
1418 /*
1419 * Place this address in the RAR if there is room,
1420 * else put the controller into promiscuous mode
1421 */
1422 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1423 rar = hw->addr_ctrl.rar_used_count -
1424 hw->addr_ctrl.mc_addr_in_rar_count;
1425 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1426 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1427 hw->addr_ctrl.rar_used_count++;
1428 } else {
1429 hw->addr_ctrl.overflow_promisc++;
1430 }
1431
1432 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1433}
1434
1435/**
1436 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1437 * @hw: pointer to hardware structure
1438 * @netdev: pointer to net device structure
1439 *
1440 * The given list replaces any existing list. Clears the secondary addrs from
1441 * receive address registers. Uses unused receive address registers for the
1442 * first secondary addresses, and falls back to promiscuous mode as needed.
1443 *
1444 * Drivers using secondary unicast addresses must set user_set_promisc when
1445 * manually putting the device into promiscuous mode.
1446 **/
1447s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1448 struct net_device *netdev)
1449{
1450 u32 i;
1451 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1452 u32 uc_addr_in_use;
1453 u32 fctrl;
1454 struct netdev_hw_addr *ha;
1455
1456 /*
1457 * Clear accounting of old secondary address list,
1458 * don't count RAR[0]
1459 */
1460 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1461 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1462 hw->addr_ctrl.overflow_promisc = 0;
1463
1464 /* Zero out the other receive addresses */
1465 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1466 for (i = 0; i < uc_addr_in_use; i++) {
1467 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1468 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1469 }
1470
1471 /* Add the new addresses */
1472 netdev_for_each_uc_addr(ha, netdev) {
1473 hw_dbg(hw, " Adding the secondary addresses:\n");
1474 ixgbe_add_uc_addr(hw, ha->addr, 0);
1475 }
1476
1477 if (hw->addr_ctrl.overflow_promisc) {
1478 /* enable promisc if not already in overflow or set by user */
1479 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1480 hw_dbg(hw, " Entering address overflow promisc mode\n");
1481 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1482 fctrl |= IXGBE_FCTRL_UPE;
1483 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1484 hw->addr_ctrl.uc_set_promisc = true;
1485 }
1486 } else {
1487 /* only disable if set by overflow, not by user */
1488 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1489 !(hw->addr_ctrl.user_set_promisc)) {
1490 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1491 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1492 fctrl &= ~IXGBE_FCTRL_UPE;
1493 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1494 hw->addr_ctrl.uc_set_promisc = false;
1495 }
1496 }
1497
1498 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1499 return 0;
1500}
1501
1502/**
1503 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1404 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1504 * @hw: pointer to hardware structure 1405 * @hw: pointer to hardware structure
1505 * @mc_addr: the multicast address 1406 * @mc_addr: the multicast address
@@ -1550,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1550 u32 vector; 1451 u32 vector;
1551 u32 vector_bit; 1452 u32 vector_bit;
1552 u32 vector_reg; 1453 u32 vector_reg;
1553 u32 mta_reg;
1554 1454
1555 hw->addr_ctrl.mta_in_use++; 1455 hw->addr_ctrl.mta_in_use++;
1556 1456
@@ -1568,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1568 */ 1468 */
1569 vector_reg = (vector >> 5) & 0x7F; 1469 vector_reg = (vector >> 5) & 0x7F;
1570 vector_bit = vector & 0x1F; 1470 vector_bit = vector & 0x1F;
1571 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1471 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1572 mta_reg |= (1 << vector_bit);
1573 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1574} 1472}
1575 1473
1576/** 1474/**
@@ -1596,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1596 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1494 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1597 hw->addr_ctrl.mta_in_use = 0; 1495 hw->addr_ctrl.mta_in_use = 0;
1598 1496
1599 /* Clear the MTA */ 1497 /* Clear mta_shadow */
1600 hw_dbg(hw, " Clearing MTA\n"); 1498 hw_dbg(hw, " Clearing MTA\n");
1601 for (i = 0; i < hw->mac.mcft_size; i++) 1499 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1602 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1603 1500
1604 /* Add the new addresses */ 1501 /* Update mta shadow */
1605 netdev_for_each_mc_addr(ha, netdev) { 1502 netdev_for_each_mc_addr(ha, netdev) {
1606 hw_dbg(hw, " Adding the multicast addresses:\n"); 1503 hw_dbg(hw, " Adding the multicast addresses:\n");
1607 ixgbe_set_mta(hw, ha->addr); 1504 ixgbe_set_mta(hw, ha->addr);
1608 } 1505 }
1609 1506
1610 /* Enable mta */ 1507 /* Enable mta */
1508 for (i = 0; i < hw->mac.mcft_size; i++)
1509 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1510 hw->mac.mta_shadow[i]);
1511
1611 if (hw->addr_ctrl.mta_in_use > 0) 1512 if (hw->addr_ctrl.mta_in_use > 0)
1612 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1513 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1613 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1514 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1624,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1624 **/ 1525 **/
1625s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1526s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1626{ 1527{
1627 u32 i;
1628 u32 rar_entries = hw->mac.num_rar_entries;
1629 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1528 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1630 1529
1631 if (a->mc_addr_in_rar_count > 0)
1632 for (i = (rar_entries - a->mc_addr_in_rar_count);
1633 i < rar_entries; i++)
1634 ixgbe_enable_rar(hw, i);
1635
1636 if (a->mta_in_use > 0) 1530 if (a->mta_in_use > 0)
1637 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1531 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1638 hw->mac.mc_filter_type); 1532 hw->mac.mc_filter_type);
@@ -1648,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1648 **/ 1542 **/
1649s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1543s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1650{ 1544{
1651 u32 i;
1652 u32 rar_entries = hw->mac.num_rar_entries;
1653 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1545 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1654 1546
1655 if (a->mc_addr_in_rar_count > 0)
1656 for (i = (rar_entries - a->mc_addr_in_rar_count);
1657 i < rar_entries; i++)
1658 ixgbe_disable_rar(hw, i);
1659
1660 if (a->mta_in_use > 0) 1547 if (a->mta_in_use > 0)
1661 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1548 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1662 1549
@@ -1685,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1685#endif /* CONFIG_DCB */ 1572#endif /* CONFIG_DCB */
1686 /* Negotiate the fc mode to use */ 1573 /* Negotiate the fc mode to use */
1687 ret_val = ixgbe_fc_autoneg(hw); 1574 ret_val = ixgbe_fc_autoneg(hw);
1688 if (ret_val) 1575 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1689 goto out; 1576 goto out;
1690 1577
1691 /* Disable any previous flow control settings */ 1578 /* Disable any previous flow control settings */
@@ -1703,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1703 * 2: Tx flow control is enabled (we can send pause frames but 1590 * 2: Tx flow control is enabled (we can send pause frames but
1704 * we do not support receiving pause frames). 1591 * we do not support receiving pause frames).
1705 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1592 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1593#ifdef CONFIG_DCB
1706 * 4: Priority Flow Control is enabled. 1594 * 4: Priority Flow Control is enabled.
1595#endif
1707 * other: Invalid. 1596 * other: Invalid.
1708 */ 1597 */
1709 switch (hw->fc.current_mode) { 1598 switch (hw->fc.current_mode) {
@@ -1791,12 +1680,13 @@ out:
1791 **/ 1680 **/
1792s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 1681s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1793{ 1682{
1794 s32 ret_val = 0; 1683 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1795 ixgbe_link_speed speed; 1684 ixgbe_link_speed speed;
1796 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1797 u32 links2, anlp1_reg, autoc_reg, links;
1798 bool link_up; 1685 bool link_up;
1799 1686
1687 if (hw->fc.disable_fc_autoneg)
1688 goto out;
1689
1800 /* 1690 /*
1801 * AN should have completed when the cable was plugged in. 1691 * AN should have completed when the cable was plugged in.
1802 * Look for reasons to bail out. Bail out if: 1692 * Look for reasons to bail out. Bail out if:
@@ -1807,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1807 * So use link_up_wait_to_complete=false. 1697 * So use link_up_wait_to_complete=false.
1808 */ 1698 */
1809 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1699 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1810 1700 if (!link_up) {
1811 if (hw->fc.disable_fc_autoneg || (!link_up)) { 1701 ret_val = IXGBE_ERR_FLOW_CONTROL;
1812 hw->fc.fc_was_autonegged = false;
1813 hw->fc.current_mode = hw->fc.requested_mode;
1814 goto out; 1702 goto out;
1815 } 1703 }
1816 1704
1817 /* 1705 switch (hw->phy.media_type) {
1818 * On backplane, bail out if 1706 /* Autoneg flow control on fiber adapters */
1819 * - backplane autoneg was not completed, or if 1707 case ixgbe_media_type_fiber:
1820 * - we are 82599 and link partner is not AN enabled 1708 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1821 */ 1709 ret_val = ixgbe_fc_autoneg_fiber(hw);
1822 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1710 break;
1823 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1824 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1825 hw->fc.fc_was_autonegged = false;
1826 hw->fc.current_mode = hw->fc.requested_mode;
1827 goto out;
1828 }
1829 1711
1830 if (hw->mac.type == ixgbe_mac_82599EB) { 1712 /* Autoneg flow control on backplane adapters */
1831 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 1713 case ixgbe_media_type_backplane:
1832 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 1714 ret_val = ixgbe_fc_autoneg_backplane(hw);
1833 hw->fc.fc_was_autonegged = false; 1715 break;
1834 hw->fc.current_mode = hw->fc.requested_mode; 1716
1835 goto out; 1717 /* Autoneg flow control on copper adapters */
1836 } 1718 case ixgbe_media_type_copper:
1837 } 1719 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
1720 ret_val = ixgbe_fc_autoneg_copper(hw);
1721 break;
1722
1723 default:
1724 break;
1838 } 1725 }
1839 1726
1727out:
1728 if (ret_val == 0) {
1729 hw->fc.fc_was_autonegged = true;
1730 } else {
1731 hw->fc.fc_was_autonegged = false;
1732 hw->fc.current_mode = hw->fc.requested_mode;
1733 }
1734 return ret_val;
1735}
1736
1737/**
1738 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
1739 * @hw: pointer to hardware structure
1740 *
1741 * Enable flow control according on 1 gig fiber.
1742 **/
1743static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
1744{
1745 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1746 s32 ret_val;
1747
1840 /* 1748 /*
1841 * On multispeed fiber at 1g, bail out if 1749 * On multispeed fiber at 1g, bail out if
1842 * - link is up but AN did not complete, or if 1750 * - link is up but AN did not complete, or if
1843 * - link is up and AN completed but timed out 1751 * - link is up and AN completed but timed out
1844 */ 1752 */
1845 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { 1753
1846 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1754 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1847 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 1755 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1848 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 1756 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1849 hw->fc.fc_was_autonegged = false; 1757 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1850 hw->fc.current_mode = hw->fc.requested_mode; 1758 goto out;
1851 goto out;
1852 }
1853 } 1759 }
1854 1760
1761 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1762 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1763
1764 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
1765 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
1766 IXGBE_PCS1GANA_ASM_PAUSE,
1767 IXGBE_PCS1GANA_SYM_PAUSE,
1768 IXGBE_PCS1GANA_ASM_PAUSE);
1769
1770out:
1771 return ret_val;
1772}
1773
1774/**
1775 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
1776 * @hw: pointer to hardware structure
1777 *
1778 * Enable flow control according to IEEE clause 37.
1779 **/
1780static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
1781{
1782 u32 links2, anlp1_reg, autoc_reg, links;
1783 s32 ret_val;
1784
1855 /* 1785 /*
1856 * Bail out on 1786 * On backplane, bail out if
1857 * - copper or CX4 adapters 1787 * - backplane autoneg was not completed, or if
1858 * - fiber adapters running at 10gig 1788 * - we are 82599 and link partner is not AN enabled
1859 */ 1789 */
1860 if ((hw->phy.media_type == ixgbe_media_type_copper) || 1790 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1861 (hw->phy.media_type == ixgbe_media_type_cx4) || 1791 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1862 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1863 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1864 hw->fc.fc_was_autonegged = false; 1792 hw->fc.fc_was_autonegged = false;
1865 hw->fc.current_mode = hw->fc.requested_mode; 1793 hw->fc.current_mode = hw->fc.requested_mode;
1794 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1866 goto out; 1795 goto out;
1867 } 1796 }
1868 1797
1798 if (hw->mac.type == ixgbe_mac_82599EB) {
1799 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1800 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1801 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode;
1803 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1804 goto out;
1805 }
1806 }
1869 /* 1807 /*
1870 * Read the AN advertisement and LP ability registers and resolve 1808 * Read the 10g AN autoc and LP ability registers and resolve
1871 * local flow control settings accordingly 1809 * local flow control settings accordingly
1872 */ 1810 */
1873 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1811 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1874 (hw->phy.media_type != ixgbe_media_type_backplane)) { 1812 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1875 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1876 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1877 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1878 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1879 /*
1880 * Now we need to check if the user selected Rx ONLY
1881 * of pause frames. In this case, we had to advertise
1882 * FULL flow control because we could not advertise RX
1883 * ONLY. Hence, we must now check to see if we need to
1884 * turn OFF the TRANSMISSION of PAUSE frames.
1885 */
1886 if (hw->fc.requested_mode == ixgbe_fc_full) {
1887 hw->fc.current_mode = ixgbe_fc_full;
1888 hw_dbg(hw, "Flow Control = FULL.\n");
1889 } else {
1890 hw->fc.current_mode = ixgbe_fc_rx_pause;
1891 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1892 }
1893 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1894 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1895 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1896 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1897 hw->fc.current_mode = ixgbe_fc_tx_pause;
1898 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1899 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1900 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1901 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1902 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1903 hw->fc.current_mode = ixgbe_fc_rx_pause;
1904 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1905 } else {
1906 hw->fc.current_mode = ixgbe_fc_none;
1907 hw_dbg(hw, "Flow Control = NONE.\n");
1908 }
1909 }
1910 1813
1911 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1814 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
1815 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
1816 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
1817
1818out:
1819 return ret_val;
1820}
1821
1822/**
1823 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
1824 * @hw: pointer to hardware structure
1825 *
1826 * Enable flow control according to IEEE clause 37.
1827 **/
1828static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
1829{
1830 u16 technology_ability_reg = 0;
1831 u16 lp_technology_ability_reg = 0;
1832
1833 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1834 MDIO_MMD_AN,
1835 &technology_ability_reg);
1836 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
1837 MDIO_MMD_AN,
1838 &lp_technology_ability_reg);
1839
1840 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
1841 (u32)lp_technology_ability_reg,
1842 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
1843 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
1844}
1845
1846/**
1847 * ixgbe_negotiate_fc - Negotiate flow control
1848 * @hw: pointer to hardware structure
1849 * @adv_reg: flow control advertised settings
1850 * @lp_reg: link partner's flow control settings
1851 * @adv_sym: symmetric pause bit in advertisement
1852 * @adv_asm: asymmetric pause bit in advertisement
1853 * @lp_sym: symmetric pause bit in link partner advertisement
1854 * @lp_asm: asymmetric pause bit in link partner advertisement
1855 *
1856 * Find the intersection between advertised settings and link partner's
1857 * advertised settings
1858 **/
1859static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1860 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
1861{
1862 if ((!(adv_reg)) || (!(lp_reg)))
1863 return IXGBE_ERR_FC_NOT_NEGOTIATED;
1864
1865 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
1912 /* 1866 /*
1913 * Read the 10g AN autoc and LP ability registers and resolve 1867 * Now we need to check if the user selected Rx ONLY
1914 * local flow control settings accordingly 1868 * of pause frames. In this case, we had to advertise
1869 * FULL flow control because we could not advertise RX
1870 * ONLY. Hence, we must now check to see if we need to
1871 * turn OFF the TRANSMISSION of PAUSE frames.
1915 */ 1872 */
1916 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1873 if (hw->fc.requested_mode == ixgbe_fc_full) {
1917 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 1874 hw->fc.current_mode = ixgbe_fc_full;
1918 1875 hw_dbg(hw, "Flow Control = FULL.\n");
1919 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1920 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1921 /*
1922 * Now we need to check if the user selected Rx ONLY
1923 * of pause frames. In this case, we had to advertise
1924 * FULL flow control because we could not advertise RX
1925 * ONLY. Hence, we must now check to see if we need to
1926 * turn OFF the TRANSMISSION of PAUSE frames.
1927 */
1928 if (hw->fc.requested_mode == ixgbe_fc_full) {
1929 hw->fc.current_mode = ixgbe_fc_full;
1930 hw_dbg(hw, "Flow Control = FULL.\n");
1931 } else {
1932 hw->fc.current_mode = ixgbe_fc_rx_pause;
1933 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1934 }
1935 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1936 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1937 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1938 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1939 hw->fc.current_mode = ixgbe_fc_tx_pause;
1940 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1941 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1942 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1943 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1944 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1945 hw->fc.current_mode = ixgbe_fc_rx_pause;
1946 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1947 } else { 1876 } else {
1948 hw->fc.current_mode = ixgbe_fc_none; 1877 hw->fc.current_mode = ixgbe_fc_rx_pause;
1949 hw_dbg(hw, "Flow Control = NONE.\n"); 1878 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
1950 } 1879 }
1880 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1881 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1882 hw->fc.current_mode = ixgbe_fc_tx_pause;
1883 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1884 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1885 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1886 hw->fc.current_mode = ixgbe_fc_rx_pause;
1887 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1888 } else {
1889 hw->fc.current_mode = ixgbe_fc_none;
1890 hw_dbg(hw, "Flow Control = NONE.\n");
1951 } 1891 }
1952 /* Record that current_mode is the result of a successful autoneg */ 1892 return 0;
1953 hw->fc.fc_was_autonegged = true;
1954
1955out:
1956 return ret_val;
1957} 1893}
1958 1894
1959/** 1895/**
@@ -1965,7 +1901,8 @@ out:
1965static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1901static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1966{ 1902{
1967 s32 ret_val = 0; 1903 s32 ret_val = 0;
1968 u32 reg; 1904 u32 reg = 0, reg_bp = 0;
1905 u16 reg_cu = 0;
1969 1906
1970#ifdef CONFIG_DCB 1907#ifdef CONFIG_DCB
1971 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1973,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1973 goto out; 1910 goto out;
1974 } 1911 }
1975 1912
1976#endif 1913#endif /* CONFIG_DCB */
1977 /* Validate the packetbuf configuration */ 1914 /* Validate the packetbuf configuration */
1978 if (packetbuf_num < 0 || packetbuf_num > 7) { 1915 if (packetbuf_num < 0 || packetbuf_num > 7) {
1979 hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2011,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 hw->fc.requested_mode = ixgbe_fc_full; 1948 hw->fc.requested_mode = ixgbe_fc_full;
2012 1949
2013 /* 1950 /*
2014 * Set up the 1G flow control advertisement registers so the HW will be 1951 * Set up the 1G and 10G flow control advertisement registers so the
2015 * able to do fc autoneg once the cable is plugged in. If we end up 1952 * HW will be able to do fc autoneg once the cable is plugged in. If
2016 * using 10g instead, this is harmless. 1953 * we link at 10G, the 1G advertisement is harmless and vice versa.
2017 */ 1954 */
2018 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1955
1956 switch (hw->phy.media_type) {
1957 case ixgbe_media_type_fiber:
1958 case ixgbe_media_type_backplane:
1959 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1960 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1961 break;
1962
1963 case ixgbe_media_type_copper:
1964 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1965 MDIO_MMD_AN, &reg_cu);
1966 break;
1967
1968 default:
1969 ;
1970 }
2019 1971
2020 /* 1972 /*
2021 * The possible values of fc.requested_mode are: 1973 * The possible values of fc.requested_mode are:
@@ -2034,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2034 case ixgbe_fc_none: 1986 case ixgbe_fc_none:
2035 /* Flow control completely disabled by software override. */ 1987 /* Flow control completely disabled by software override. */
2036 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 1988 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1989 if (hw->phy.media_type == ixgbe_media_type_backplane)
1990 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
1991 IXGBE_AUTOC_ASM_PAUSE);
1992 else if (hw->phy.media_type == ixgbe_media_type_copper)
1993 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2037 break; 1994 break;
2038 case ixgbe_fc_rx_pause: 1995 case ixgbe_fc_rx_pause:
2039 /* 1996 /*
@@ -2045,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2045 * disable the adapter's ability to send PAUSE frames. 2002 * disable the adapter's ability to send PAUSE frames.
2046 */ 2003 */
2047 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2004 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2005 if (hw->phy.media_type == ixgbe_media_type_backplane)
2006 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2007 IXGBE_AUTOC_ASM_PAUSE);
2008 else if (hw->phy.media_type == ixgbe_media_type_copper)
2009 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2048 break; 2010 break;
2049 case ixgbe_fc_tx_pause: 2011 case ixgbe_fc_tx_pause:
2050 /* 2012 /*
@@ -2053,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2053 */ 2015 */
2054 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2016 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2055 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2017 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2018 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2019 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2020 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2021 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2022 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2023 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2024 }
2056 break; 2025 break;
2057 case ixgbe_fc_full: 2026 case ixgbe_fc_full:
2058 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2027 /* Flow control (both Rx and Tx) is enabled by SW override. */
2059 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2028 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2029 if (hw->phy.media_type == ixgbe_media_type_backplane)
2030 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2031 IXGBE_AUTOC_ASM_PAUSE);
2032 else if (hw->phy.media_type == ixgbe_media_type_copper)
2033 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2060 break; 2034 break;
2061#ifdef CONFIG_DCB 2035#ifdef CONFIG_DCB
2062 case ixgbe_fc_pfc: 2036 case ixgbe_fc_pfc:
@@ -2070,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2070 break; 2044 break;
2071 } 2045 }
2072 2046
2073 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2047 if (hw->mac.type != ixgbe_mac_X540) {
2074 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2048 /*
2075 2049 * Enable auto-negotiation between the MAC & PHY;
2076 /* Disable AN timeout */ 2050 * the MAC will advertise clause 37 flow control.
2077 if (hw->fc.strict_ieee) 2051 */
2078 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2052 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2053 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2079 2054
2080 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2055 /* Disable AN timeout */
2081 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2056 if (hw->fc.strict_ieee)
2057 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2082 2058
2083 /* 2059 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2084 * Set up the 10G flow control advertisement registers so the HW 2060 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2085 * can do fc autoneg once the cable is plugged in. If we end up 2061 }
2086 * using 1g instead, this is harmless.
2087 */
2088 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2089 2062
2090 /* 2063 /*
2091 * The possible values of fc.requested_mode are: 2064 * AUTOC restart handles negotiation of 1G and 10G on backplane
2092 * 0: Flow control is completely disabled 2065 * and copper. There is no need to set the PCS1GCTL register.
2093 * 1: Rx flow control is enabled (we can receive pause frames, 2066 *
2094 * but not send pause frames).
2095 * 2: Tx flow control is enabled (we can send pause frames but
2096 * we do not support receiving pause frames).
2097 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2098 * other: Invalid.
2099 */ 2067 */
2100 switch (hw->fc.requested_mode) { 2068 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2101 case ixgbe_fc_none: 2069 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2102 /* Flow control completely disabled by software override. */ 2070 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2103 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); 2071 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2104 break; 2072 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2105 case ixgbe_fc_rx_pause: 2073 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2106 /* 2074 MDIO_MMD_AN, reg_cu);
2107 * Rx Flow control is enabled and Tx Flow control is
2108 * disabled by software override. Since there really
2109 * isn't a way to advertise that we are capable of RX
2110 * Pause ONLY, we will advertise that we support both
2111 * symmetric and asymmetric Rx PAUSE. Later, we will
2112 * disable the adapter's ability to send PAUSE frames.
2113 */
2114 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2115 break;
2116 case ixgbe_fc_tx_pause:
2117 /*
2118 * Tx Flow control is enabled, and Rx Flow control is
2119 * disabled by software override.
2120 */
2121 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2122 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2123 break;
2124 case ixgbe_fc_full:
2125 /* Flow control (both Rx and Tx) is enabled by SW override. */
2126 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2127 break;
2128#ifdef CONFIG_DCB
2129 case ixgbe_fc_pfc:
2130 goto out;
2131 break;
2132#endif /* CONFIG_DCB */
2133 default:
2134 hw_dbg(hw, "Flow control param set incorrectly\n");
2135 ret_val = IXGBE_ERR_CONFIG;
2136 goto out;
2137 break;
2138 } 2075 }
2139 /*
2140 * AUTOC restart handles negotiation of 1G and 10G. There is
2141 * no need to set the PCS1GCTL register.
2142 */
2143 reg |= IXGBE_AUTOC_AN_RESTART;
2144 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2145 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2146 2076
2077 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2147out: 2078out:
2148 return ret_val; 2079 return ret_val;
2149} 2080}
@@ -2159,10 +2090,16 @@ out:
2159 **/ 2090 **/
2160s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2091s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2161{ 2092{
2093 struct ixgbe_adapter *adapter = hw->back;
2162 u32 i; 2094 u32 i;
2163 u32 reg_val; 2095 u32 reg_val;
2164 u32 number_of_queues; 2096 u32 number_of_queues;
2165 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2097 s32 status = 0;
2098 u16 dev_status = 0;
2099
2100 /* Just jump out if bus mastering is already disabled */
2101 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2102 goto out;
2166 2103
2167 /* Disable the receive unit by stopping each queue */ 2104 /* Disable the receive unit by stopping each queue */
2168 number_of_queues = hw->mac.max_rx_queues; 2105 number_of_queues = hw->mac.max_rx_queues;
@@ -2179,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2179 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2180 2117
2181 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2182 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2183 status = 0; 2120 goto check_device_status;
2121 udelay(100);
2122 }
2123
2124 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2125 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2126
2127 /*
2128 * Before proceeding, make sure that the PCIe block does not have
2129 * transactions pending.
2130 */
2131check_device_status:
2132 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2133 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2134 &dev_status);
2135 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2184 break; 2136 break;
2185 }
2186 udelay(100); 2137 udelay(100);
2187 } 2138 }
2188 2139
2140 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2141 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2142 else
2143 goto out;
2144
2145 /*
2146 * Two consecutive resets are required via CTRL.RST per datasheet
2147 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2148 * of this need. The first reset prevents new master requests from
2149 * being issued by our device. We then must wait 1usec for any
2150 * remaining completions from the PCIe bus to trickle in, and then reset
2151 * again to clear out any effects they may have had on our device.
2152 */
2153 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2154
2155out:
2189 return status; 2156 return status;
2190} 2157}
2191 2158
@@ -2195,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2195 * @hw: pointer to hardware structure 2162 * @hw: pointer to hardware structure
2196 * @mask: Mask to specify which semaphore to acquire 2163 * @mask: Mask to specify which semaphore to acquire
2197 * 2164 *
2198 * Acquires the SWFW semaphore thought the GSSR register for the specified 2165 * Acquires the SWFW semaphore through the GSSR register for the specified
2199 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2166 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2200 **/ 2167 **/
2201s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2168s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2206,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2206 s32 timeout = 200; 2173 s32 timeout = 200;
2207 2174
2208 while (timeout) { 2175 while (timeout) {
2176 /*
2177 * SW EEPROM semaphore bit is used for access to all
2178 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2179 */
2209 if (ixgbe_get_eeprom_semaphore(hw)) 2180 if (ixgbe_get_eeprom_semaphore(hw))
2210 return IXGBE_ERR_SWFW_SYNC; 2181 return IXGBE_ERR_SWFW_SYNC;
2211 2182
@@ -2223,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2223 } 2194 }
2224 2195
2225 if (!timeout) { 2196 if (!timeout) {
2226 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2197 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2227 return IXGBE_ERR_SWFW_SYNC; 2198 return IXGBE_ERR_SWFW_SYNC;
2228 } 2199 }
2229 2200
@@ -2239,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2239 * @hw: pointer to hardware structure 2210 * @hw: pointer to hardware structure
2240 * @mask: Mask to specify which semaphore to release 2211 * @mask: Mask to specify which semaphore to release
2241 * 2212 *
2242 * Releases the SWFW semaphore thought the GSSR register for the specified 2213 * Releases the SWFW semaphore through the GSSR register for the specified
2243 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2214 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2244 **/ 2215 **/
2245void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2216void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2427,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2427 u32 mpsar_lo, mpsar_hi; 2398 u32 mpsar_lo, mpsar_hi;
2428 u32 rar_entries = hw->mac.num_rar_entries; 2399 u32 rar_entries = hw->mac.num_rar_entries;
2429 2400
2430 if (rar < rar_entries) { 2401 /* Make sure we are using a valid rar index range */
2431 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2402 if (rar >= rar_entries) {
2432 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2403 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2404 return IXGBE_ERR_INVALID_ARGUMENT;
2405 }
2433 2406
2434 if (!mpsar_lo && !mpsar_hi) 2407 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2435 goto done; 2408 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2436 2409
2437 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2410 if (!mpsar_lo && !mpsar_hi)
2438 if (mpsar_lo) { 2411 goto done;
2439 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2440 mpsar_lo = 0;
2441 }
2442 if (mpsar_hi) {
2443 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2444 mpsar_hi = 0;
2445 }
2446 } else if (vmdq < 32) {
2447 mpsar_lo &= ~(1 << vmdq);
2448 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2449 } else {
2450 mpsar_hi &= ~(1 << (vmdq - 32));
2451 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2452 }
2453 2412
2454 /* was that the last pool using this rar? */ 2413 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2455 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2414 if (mpsar_lo) {
2456 hw->mac.ops.clear_rar(hw, rar); 2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2416 mpsar_lo = 0;
2417 }
2418 if (mpsar_hi) {
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2420 mpsar_hi = 0;
2421 }
2422 } else if (vmdq < 32) {
2423 mpsar_lo &= ~(1 << vmdq);
2424 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2457 } else { 2425 } else {
2458 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2426 mpsar_hi &= ~(1 << (vmdq - 32));
2427 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2459 } 2428 }
2460 2429
2430 /* was that the last pool using this rar? */
2431 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2432 hw->mac.ops.clear_rar(hw, rar);
2461done: 2433done:
2462 return 0; 2434 return 0;
2463} 2435}
@@ -2473,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2473 u32 mpsar; 2445 u32 mpsar;
2474 u32 rar_entries = hw->mac.num_rar_entries; 2446 u32 rar_entries = hw->mac.num_rar_entries;
2475 2447
2476 if (rar < rar_entries) { 2448 /* Make sure we are using a valid rar index range */
2477 if (vmdq < 32) { 2449 if (rar >= rar_entries) {
2478 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2479 mpsar |= 1 << vmdq;
2480 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2481 } else {
2482 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2483 mpsar |= 1 << (vmdq - 32);
2484 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2485 }
2486 } else {
2487 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2450 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2451 return IXGBE_ERR_INVALID_ARGUMENT;
2452 }
2453
2454 if (vmdq < 32) {
2455 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2456 mpsar |= 1 << vmdq;
2457 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2458 } else {
2459 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2460 mpsar |= 1 << (vmdq - 32);
2461 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2488 } 2462 }
2489 return 0; 2463 return 0;
2490} 2464}
@@ -2497,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2497{ 2471{
2498 int i; 2472 int i;
2499 2473
2500
2501 for (i = 0; i < 128; i++) 2474 for (i = 0; i < 128; i++)
2502 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2475 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2503 2476
@@ -2726,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2726 * Reads the links register to determine if link is up and the current speed 2699 * Reads the links register to determine if link is up and the current speed
2727 **/ 2700 **/
2728s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 2701s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2729 bool *link_up, bool link_up_wait_to_complete) 2702 bool *link_up, bool link_up_wait_to_complete)
2730{ 2703{
2731 u32 links_reg; 2704 u32 links_reg, links_orig;
2732 u32 i; 2705 u32 i;
2733 2706
2707 /* clear the old state */
2708 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
2709
2734 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 2710 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2711
2712 if (links_orig != links_reg) {
2713 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
2714 links_orig, links_reg);
2715 }
2716
2735 if (link_up_wait_to_complete) { 2717 if (link_up_wait_to_complete) {
2736 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 2718 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2737 if (links_reg & IXGBE_LINKS_UP) { 2719 if (links_reg & IXGBE_LINKS_UP) {
@@ -2754,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2754 IXGBE_LINKS_SPEED_10G_82599) 2736 IXGBE_LINKS_SPEED_10G_82599)
2755 *speed = IXGBE_LINK_SPEED_10GB_FULL; 2737 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2756 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 2738 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2757 IXGBE_LINKS_SPEED_1G_82599) 2739 IXGBE_LINKS_SPEED_1G_82599)
2758 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2740 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2759 else 2741 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2742 IXGBE_LINKS_SPEED_100_82599)
2760 *speed = IXGBE_LINK_SPEED_100_FULL; 2743 *speed = IXGBE_LINK_SPEED_100_FULL;
2744 else
2745 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2761 2746
2762 /* if link is down, zero out the current_mode */ 2747 /* if link is down, zero out the current_mode */
2763 if (*link_up == false) { 2748 if (*link_up == false) {
@@ -2814,6 +2799,28 @@ wwn_prefix_out:
2814} 2799}
2815 2800
2816/** 2801/**
2802 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
2803 * control
2804 * @hw: pointer to hardware structure
2805 *
2806 * There are several phys that do not support autoneg flow control. This
2807 * function check the device id to see if the associated phy supports
2808 * autoneg flow control.
2809 **/
2810static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
2811{
2812
2813 switch (hw->device_id) {
2814 case IXGBE_DEV_ID_X540T:
2815 return 0;
2816 case IXGBE_DEV_ID_82599_T3_LOM:
2817 return 0;
2818 default:
2819 return IXGBE_ERR_FC_NOT_SUPPORTED;
2820 }
2821}
2822
2823/**
2817 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 2824 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
2818 * @hw: pointer to hardware structure 2825 * @hw: pointer to hardware structure
2819 * @enable: enable or disable switch for anti-spoofing 2826 * @enable: enable or disable switch for anti-spoofing