aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e/lib.c')
-rw-r--r--drivers/net/e1000e/lib.c535
1 files changed, 292 insertions, 243 deletions
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 99ba2b8a2a05..a8b2c0de27c4 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -26,11 +26,6 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33
34#include "e1000.h" 29#include "e1000.h"
35 30
36enum e1000_mng_mode { 31enum e1000_mng_mode {
@@ -56,10 +51,10 @@ enum e1000_mng_mode {
56 **/ 51 **/
57s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) 52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58{ 53{
54 struct e1000_mac_info *mac = &hw->mac;
59 struct e1000_bus_info *bus = &hw->bus; 55 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter; 56 struct e1000_adapter *adapter = hw->adapter;
61 u32 status; 57 u16 pcie_link_status, cap_offset;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63 58
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) { 60 if (!cap_offset) {
@@ -73,21 +68,64 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
73 PCIE_LINK_WIDTH_SHIFT); 68 PCIE_LINK_WIDTH_SHIFT);
74 } 69 }
75 70
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, 71 mac->ops.set_lan_id(hw);
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85 72
86 return 0; 73 return 0;
87} 74}
88 75
89/** 76/**
90 * e1000e_write_vfta - Write value to VLAN filter table 77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
78 *
79 * @hw: pointer to the HW structure
80 *
81 * Determines the LAN function id by reading memory-mapped registers
82 * and swaps the port value if requested.
83 **/
84void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
85{
86 struct e1000_bus_info *bus = &hw->bus;
87 u32 reg;
88
89 /*
90 * The status register reports the correct function number
91 * for the device regardless of function swap state.
92 */
93 reg = er32(STATUS);
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
95}
96
97/**
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device
99 * @hw: pointer to the HW structure
100 *
101 * Sets the LAN function id to zero for a single port device.
102 **/
103void e1000_set_lan_id_single_port(struct e1000_hw *hw)
104{
105 struct e1000_bus_info *bus = &hw->bus;
106
107 bus->func = 0;
108}
109
110/**
111 * e1000_clear_vfta_generic - Clear VLAN filter table
112 * @hw: pointer to the HW structure
113 *
114 * Clears the register array which contains the VLAN filter table by
115 * setting all the values to 0.
116 **/
117void e1000_clear_vfta_generic(struct e1000_hw *hw)
118{
119 u32 offset;
120
121 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
122 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
123 e1e_flush();
124 }
125}
126
127/**
128 * e1000_write_vfta_generic - Write value to VLAN filter table
91 * @hw: pointer to the HW structure 129 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table 130 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table 131 * @value: register value written to VLAN filter table
@@ -95,7 +133,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
95 * Writes value at the given offset in the register array which stores 133 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table. 134 * the VLAN filter table.
97 **/ 135 **/
98void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 136void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
99{ 137{
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); 138 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush(); 139 e1e_flush();
@@ -113,20 +151,79 @@ void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
113void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 151void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114{ 152{
115 u32 i; 153 u32 i;
154 u8 mac_addr[ETH_ALEN] = {0};
116 155
117 /* Setup the receive address */ 156 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); 157 e_dbg("Programming MAC Address into RAR[0]\n");
119 158
120 e1000e_rar_set(hw, hw->mac.addr, 0); 159 e1000e_rar_set(hw, hw->mac.addr, 0);
121 160
122 /* Zero out the other (rar_entry_count - 1) receive addresses */ 161 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); 162 e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) { 163 for (i = 1; i < rar_count; i++)
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 164 e1000e_rar_set(hw, mac_addr, i);
126 e1e_flush(); 165}
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); 166
128 e1e_flush(); 167/**
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
169 * @hw: pointer to the HW structure
170 *
171 * Checks the nvm for an alternate MAC address. An alternate MAC address
172 * can be setup by pre-boot software and must be treated like a permanent
173 * address and must override the actual permanent MAC address. If an
174 * alternate MAC address is found it is programmed into RAR0, replacing
175 * the permanent address that was installed into RAR0 by the Si on reset.
176 * This function will return SUCCESS unless it encounters an error while
177 * reading the EEPROM.
178 **/
179s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
180{
181 u32 i;
182 s32 ret_val = 0;
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN];
185
186 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
187 &nvm_alt_mac_addr_offset);
188 if (ret_val) {
189 e_dbg("NVM Read Error\n");
190 goto out;
129 } 191 }
192
193 if (nvm_alt_mac_addr_offset == 0xFFFF) {
194 /* There is no Alternate MAC Address */
195 goto out;
196 }
197
198 if (hw->bus.func == E1000_FUNC_1)
199 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
200 for (i = 0; i < ETH_ALEN; i += 2) {
201 offset = nvm_alt_mac_addr_offset + (i >> 1);
202 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
203 if (ret_val) {
204 e_dbg("NVM Read Error\n");
205 goto out;
206 }
207
208 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
209 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
210 }
211
212 /* if multicast bit is set, the alternate address will not be used */
213 if (alt_mac_addr[0] & 0x01) {
214 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
215 goto out;
216 }
217
218 /*
219 * We have a valid alternate MAC address, and we want to treat it the
220 * same as the normal permanent MAC address stored by the HW into the
221 * RAR. Do this by mapping this address into RAR0.
222 */
223 e1000e_rar_set(hw, alt_mac_addr, 0);
224
225out:
226 return ret_val;
130} 227}
131 228
132/** 229/**
@@ -152,10 +249,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
152 249
153 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 250 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
154 251
155 rar_high |= E1000_RAH_AV; 252 /* If MAC address zero, no need to set the AV bit */
253 if (rar_low || rar_high)
254 rar_high |= E1000_RAH_AV;
156 255
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); 256 /*
158 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); 257 * Some bridges will combine consecutive 32-bit writes into
258 * a single burst write, which will malfunction on some parts.
259 * The flushes avoid this.
260 */
261 ew32(RAL(index), rar_low);
262 e1e_flush();
263 ew32(RAH(index), rar_high);
264 e1e_flush();
159} 265}
160 266
161/** 267/**
@@ -234,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
234 * @hw: pointer to the HW structure 340 * @hw: pointer to the HW structure
235 * @mc_addr_list: array of multicast addresses to program 341 * @mc_addr_list: array of multicast addresses to program
236 * @mc_addr_count: number of multicast addresses to program 342 * @mc_addr_count: number of multicast addresses to program
237 * @rar_used_count: the first RAR register free to program
238 * @rar_count: total number of supported Receive Address Registers
239 * 343 *
240 * Updates the Receive Address Registers and Multicast Table Array. 344 * Updates entire Multicast Table Array.
241 * The caller must have a packed mc_addr_list of multicast addresses. 345 * The caller must have a packed mc_addr_list of multicast addresses.
242 * The parameter rar_count will usually be hw->mac.rar_entry_count
243 * unless there are workarounds that change this.
244 **/ 346 **/
245void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 347void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
246 u8 *mc_addr_list, u32 mc_addr_count, 348 u8 *mc_addr_list, u32 mc_addr_count)
247 u32 rar_used_count, u32 rar_count)
248{ 349{
249 u32 i; 350 u32 hash_value, hash_bit, hash_reg;
250 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC); 351 int i;
251 352
252 if (!mcarray) { 353 /* clear mta_shadow */
253 printk(KERN_ERR "multicast array memory allocation failed\n"); 354 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
254 return;
255 }
256 355
257 /* 356 /* update mta_shadow from mc_addr_list */
258 * Load the first set of multicast addresses into the exact 357 for (i = 0; (u32) i < mc_addr_count; i++) {
259 * filters (RAR). If there are not enough to fill the RAR
260 * array, clear the filters.
261 */
262 for (i = rar_used_count; i < rar_count; i++) {
263 if (mc_addr_count) {
264 e1000e_rar_set(hw, mc_addr_list, i);
265 mc_addr_count--;
266 mc_addr_list += ETH_ALEN;
267 } else {
268 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
269 e1e_flush();
270 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
271 e1e_flush();
272 }
273 }
274
275 /* Load any remaining multicast addresses into the hash table. */
276 for (; mc_addr_count > 0; mc_addr_count--) {
277 u32 hash_value, hash_reg, hash_bit, mta;
278 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 358 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
279 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); 359
280 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 360 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
281 hash_bit = hash_value & 0x1F; 361 hash_bit = hash_value & 0x1F;
282 mta = (1 << hash_bit);
283 mcarray[hash_reg] |= mta;
284 mc_addr_list += ETH_ALEN;
285 }
286 362
287 /* write the hash table completely */ 363 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
288 for (i = 0; i < hw->mac.mta_reg_count; i++) 364 mc_addr_list += (ETH_ALEN);
289 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]); 365 }
290 366
367 /* replace the entire MTA table */
368 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
369 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
291 e1e_flush(); 370 e1e_flush();
292 kfree(mcarray);
293} 371}
294 372
295/** 373/**
@@ -300,45 +378,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
300 **/ 378 **/
301void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) 379void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
302{ 380{
303 u32 temp; 381 er32(CRCERRS);
304 382 er32(SYMERRS);
305 temp = er32(CRCERRS); 383 er32(MPC);
306 temp = er32(SYMERRS); 384 er32(SCC);
307 temp = er32(MPC); 385 er32(ECOL);
308 temp = er32(SCC); 386 er32(MCC);
309 temp = er32(ECOL); 387 er32(LATECOL);
310 temp = er32(MCC); 388 er32(COLC);
311 temp = er32(LATECOL); 389 er32(DC);
312 temp = er32(COLC); 390 er32(SEC);
313 temp = er32(DC); 391 er32(RLEC);
314 temp = er32(SEC); 392 er32(XONRXC);
315 temp = er32(RLEC); 393 er32(XONTXC);
316 temp = er32(XONRXC); 394 er32(XOFFRXC);
317 temp = er32(XONTXC); 395 er32(XOFFTXC);
318 temp = er32(XOFFRXC); 396 er32(FCRUC);
319 temp = er32(XOFFTXC); 397 er32(GPRC);
320 temp = er32(FCRUC); 398 er32(BPRC);
321 temp = er32(GPRC); 399 er32(MPRC);
322 temp = er32(BPRC); 400 er32(GPTC);
323 temp = er32(MPRC); 401 er32(GORCL);
324 temp = er32(GPTC); 402 er32(GORCH);
325 temp = er32(GORCL); 403 er32(GOTCL);
326 temp = er32(GORCH); 404 er32(GOTCH);
327 temp = er32(GOTCL); 405 er32(RNBC);
328 temp = er32(GOTCH); 406 er32(RUC);
329 temp = er32(RNBC); 407 er32(RFC);
330 temp = er32(RUC); 408 er32(ROC);
331 temp = er32(RFC); 409 er32(RJC);
332 temp = er32(ROC); 410 er32(TORL);
333 temp = er32(RJC); 411 er32(TORH);
334 temp = er32(TORL); 412 er32(TOTL);
335 temp = er32(TORH); 413 er32(TOTH);
336 temp = er32(TOTL); 414 er32(TPR);
337 temp = er32(TOTH); 415 er32(TPT);
338 temp = er32(TPR); 416 er32(MPTC);
339 temp = er32(TPT); 417 er32(BPTC);
340 temp = er32(MPTC);
341 temp = er32(BPTC);
342} 418}
343 419
344/** 420/**
@@ -376,7 +452,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
376 if (!link) 452 if (!link)
377 return ret_val; /* No link detected */ 453 return ret_val; /* No link detected */
378 454
379 mac->get_link_status = 0; 455 mac->get_link_status = false;
380 456
381 /* 457 /*
382 * Check if there was DownShift, must be checked 458 * Check if there was DownShift, must be checked
@@ -408,7 +484,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
408 */ 484 */
409 ret_val = e1000e_config_fc_after_link_up(hw); 485 ret_val = e1000e_config_fc_after_link_up(hw);
410 if (ret_val) { 486 if (ret_val) {
411 hw_dbg(hw, "Error configuring flow control\n"); 487 e_dbg("Error configuring flow control\n");
412 } 488 }
413 489
414 return ret_val; 490 return ret_val;
@@ -448,7 +524,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
448 mac->autoneg_failed = 1; 524 mac->autoneg_failed = 1;
449 return 0; 525 return 0;
450 } 526 }
451 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 527 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
452 528
453 /* Disable auto-negotiation in the TXCW register */ 529 /* Disable auto-negotiation in the TXCW register */
454 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 530 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -461,7 +537,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
461 /* Configure Flow Control after forcing link up. */ 537 /* Configure Flow Control after forcing link up. */
462 ret_val = e1000e_config_fc_after_link_up(hw); 538 ret_val = e1000e_config_fc_after_link_up(hw);
463 if (ret_val) { 539 if (ret_val) {
464 hw_dbg(hw, "Error configuring flow control\n"); 540 e_dbg("Error configuring flow control\n");
465 return ret_val; 541 return ret_val;
466 } 542 }
467 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 543 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -471,7 +547,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
471 * and disable forced link in the Device Control register 547 * and disable forced link in the Device Control register
472 * in an attempt to auto-negotiate with our link partner. 548 * in an attempt to auto-negotiate with our link partner.
473 */ 549 */
474 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 550 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
475 ew32(TXCW, mac->txcw); 551 ew32(TXCW, mac->txcw);
476 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 552 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
477 553
@@ -513,7 +589,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
513 mac->autoneg_failed = 1; 589 mac->autoneg_failed = 1;
514 return 0; 590 return 0;
515 } 591 }
516 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 592 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
517 593
518 /* Disable auto-negotiation in the TXCW register */ 594 /* Disable auto-negotiation in the TXCW register */
519 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 595 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -526,7 +602,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
526 /* Configure Flow Control after forcing link up. */ 602 /* Configure Flow Control after forcing link up. */
527 ret_val = e1000e_config_fc_after_link_up(hw); 603 ret_val = e1000e_config_fc_after_link_up(hw);
528 if (ret_val) { 604 if (ret_val) {
529 hw_dbg(hw, "Error configuring flow control\n"); 605 e_dbg("Error configuring flow control\n");
530 return ret_val; 606 return ret_val;
531 } 607 }
532 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 608 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -536,7 +612,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
536 * and disable forced link in the Device Control register 612 * and disable forced link in the Device Control register
537 * in an attempt to auto-negotiate with our link partner. 613 * in an attempt to auto-negotiate with our link partner.
538 */ 614 */
539 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 615 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
540 ew32(TXCW, mac->txcw); 616 ew32(TXCW, mac->txcw);
541 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 617 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
542 618
@@ -553,11 +629,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
553 if (rxcw & E1000_RXCW_SYNCH) { 629 if (rxcw & E1000_RXCW_SYNCH) {
554 if (!(rxcw & E1000_RXCW_IV)) { 630 if (!(rxcw & E1000_RXCW_IV)) {
555 mac->serdes_has_link = true; 631 mac->serdes_has_link = true;
556 hw_dbg(hw, "SERDES: Link up - forced.\n"); 632 e_dbg("SERDES: Link up - forced.\n");
557 } 633 }
558 } else { 634 } else {
559 mac->serdes_has_link = false; 635 mac->serdes_has_link = false;
560 hw_dbg(hw, "SERDES: Link down - force failed.\n"); 636 e_dbg("SERDES: Link down - force failed.\n");
561 } 637 }
562 } 638 }
563 639
@@ -570,20 +646,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
570 if (rxcw & E1000_RXCW_SYNCH) { 646 if (rxcw & E1000_RXCW_SYNCH) {
571 if (!(rxcw & E1000_RXCW_IV)) { 647 if (!(rxcw & E1000_RXCW_IV)) {
572 mac->serdes_has_link = true; 648 mac->serdes_has_link = true;
573 hw_dbg(hw, "SERDES: Link up - autoneg " 649 e_dbg("SERDES: Link up - autoneg "
574 "completed sucessfully.\n"); 650 "completed successfully.\n");
575 } else { 651 } else {
576 mac->serdes_has_link = false; 652 mac->serdes_has_link = false;
577 hw_dbg(hw, "SERDES: Link down - invalid" 653 e_dbg("SERDES: Link down - invalid"
578 "codewords detected in autoneg.\n"); 654 "codewords detected in autoneg.\n");
579 } 655 }
580 } else { 656 } else {
581 mac->serdes_has_link = false; 657 mac->serdes_has_link = false;
582 hw_dbg(hw, "SERDES: Link down - no sync.\n"); 658 e_dbg("SERDES: Link down - no sync.\n");
583 } 659 }
584 } else { 660 } else {
585 mac->serdes_has_link = false; 661 mac->serdes_has_link = false;
586 hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); 662 e_dbg("SERDES: Link down - autoneg failed\n");
587 } 663 }
588 } 664 }
589 665
@@ -614,7 +690,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
614 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 690 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
615 691
616 if (ret_val) { 692 if (ret_val) {
617 hw_dbg(hw, "NVM Read Error\n"); 693 e_dbg("NVM Read Error\n");
618 return ret_val; 694 return ret_val;
619 } 695 }
620 696
@@ -667,7 +743,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
667 */ 743 */
668 hw->fc.current_mode = hw->fc.requested_mode; 744 hw->fc.current_mode = hw->fc.requested_mode;
669 745
670 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", 746 e_dbg("After fix-ups FlowControl is now = %x\n",
671 hw->fc.current_mode); 747 hw->fc.current_mode);
672 748
673 /* Call the necessary media_type subroutine to configure the link. */ 749 /* Call the necessary media_type subroutine to configure the link. */
@@ -681,7 +757,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
681 * control is disabled, because it does not hurt anything to 757 * control is disabled, because it does not hurt anything to
682 * initialize these registers. 758 * initialize these registers.
683 */ 759 */
684 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); 760 e_dbg("Initializing the Flow Control address, type and timer regs\n");
685 ew32(FCT, FLOW_CONTROL_TYPE); 761 ew32(FCT, FLOW_CONTROL_TYPE);
686 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 762 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
687 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 763 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
@@ -751,7 +827,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
751 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 827 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
752 break; 828 break;
753 default: 829 default:
754 hw_dbg(hw, "Flow control param set incorrectly\n"); 830 e_dbg("Flow control param set incorrectly\n");
755 return -E1000_ERR_CONFIG; 831 return -E1000_ERR_CONFIG;
756 break; 832 break;
757 } 833 }
@@ -789,7 +865,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
789 break; 865 break;
790 } 866 }
791 if (i == FIBER_LINK_UP_LIMIT) { 867 if (i == FIBER_LINK_UP_LIMIT) {
792 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 868 e_dbg("Never got a valid link from auto-neg!!!\n");
793 mac->autoneg_failed = 1; 869 mac->autoneg_failed = 1;
794 /* 870 /*
795 * AutoNeg failed to achieve a link, so we'll call 871 * AutoNeg failed to achieve a link, so we'll call
@@ -799,13 +875,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
799 */ 875 */
800 ret_val = mac->ops.check_for_link(hw); 876 ret_val = mac->ops.check_for_link(hw);
801 if (ret_val) { 877 if (ret_val) {
802 hw_dbg(hw, "Error while checking for link\n"); 878 e_dbg("Error while checking for link\n");
803 return ret_val; 879 return ret_val;
804 } 880 }
805 mac->autoneg_failed = 0; 881 mac->autoneg_failed = 0;
806 } else { 882 } else {
807 mac->autoneg_failed = 0; 883 mac->autoneg_failed = 0;
808 hw_dbg(hw, "Valid Link Found\n"); 884 e_dbg("Valid Link Found\n");
809 } 885 }
810 886
811 return 0; 887 return 0;
@@ -841,7 +917,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
841 * then the link-up status bit will be set and the flow control enable 917 * then the link-up status bit will be set and the flow control enable
842 * bits (RFCE and TFCE) will be set according to their negotiated value. 918 * bits (RFCE and TFCE) will be set according to their negotiated value.
843 */ 919 */
844 hw_dbg(hw, "Auto-negotiation enabled\n"); 920 e_dbg("Auto-negotiation enabled\n");
845 921
846 ew32(CTRL, ctrl); 922 ew32(CTRL, ctrl);
847 e1e_flush(); 923 e1e_flush();
@@ -856,7 +932,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
856 (er32(CTRL) & E1000_CTRL_SWDPIN1)) { 932 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
857 ret_val = e1000_poll_fiber_serdes_link_generic(hw); 933 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
858 } else { 934 } else {
859 hw_dbg(hw, "No signal detected\n"); 935 e_dbg("No signal detected\n");
860 } 936 }
861 937
862 return 0; 938 return 0;
@@ -952,7 +1028,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
952 * 3: Both Rx and Tx flow control (symmetric) is enabled. 1028 * 3: Both Rx and Tx flow control (symmetric) is enabled.
953 * other: No other values should be possible at this point. 1029 * other: No other values should be possible at this point.
954 */ 1030 */
955 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); 1031 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
956 1032
957 switch (hw->fc.current_mode) { 1033 switch (hw->fc.current_mode) {
958 case e1000_fc_none: 1034 case e1000_fc_none:
@@ -970,7 +1046,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
970 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 1046 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
971 break; 1047 break;
972 default: 1048 default:
973 hw_dbg(hw, "Flow control param set incorrectly\n"); 1049 e_dbg("Flow control param set incorrectly\n");
974 return -E1000_ERR_CONFIG; 1050 return -E1000_ERR_CONFIG;
975 } 1051 }
976 1052
@@ -1011,7 +1087,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1011 } 1087 }
1012 1088
1013 if (ret_val) { 1089 if (ret_val) {
1014 hw_dbg(hw, "Error forcing flow control settings\n"); 1090 e_dbg("Error forcing flow control settings\n");
1015 return ret_val; 1091 return ret_val;
1016 } 1092 }
1017 1093
@@ -1035,7 +1111,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1035 return ret_val; 1111 return ret_val;
1036 1112
1037 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 1113 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1038 hw_dbg(hw, "Copper PHY and Auto Neg " 1114 e_dbg("Copper PHY and Auto Neg "
1039 "has not completed.\n"); 1115 "has not completed.\n");
1040 return ret_val; 1116 return ret_val;
1041 } 1117 }
@@ -1076,7 +1152,6 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1076 * 1 | 1 | 0 | 0 | e1000_fc_none 1152 * 1 | 1 | 0 | 0 | e1000_fc_none
1077 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1153 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1078 * 1154 *
1079 *
1080 * Are both PAUSE bits set to 1? If so, this implies 1155 * Are both PAUSE bits set to 1? If so, this implies
1081 * Symmetric Flow Control is enabled at both ends. The 1156 * Symmetric Flow Control is enabled at both ends. The
1082 * ASM_DIR bits are irrelevant per the spec. 1157 * ASM_DIR bits are irrelevant per the spec.
@@ -1100,10 +1175,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1100 */ 1175 */
1101 if (hw->fc.requested_mode == e1000_fc_full) { 1176 if (hw->fc.requested_mode == e1000_fc_full) {
1102 hw->fc.current_mode = e1000_fc_full; 1177 hw->fc.current_mode = e1000_fc_full;
1103 hw_dbg(hw, "Flow Control = FULL.\r\n"); 1178 e_dbg("Flow Control = FULL.\r\n");
1104 } else { 1179 } else {
1105 hw->fc.current_mode = e1000_fc_rx_pause; 1180 hw->fc.current_mode = e1000_fc_rx_pause;
1106 hw_dbg(hw, "Flow Control = " 1181 e_dbg("Flow Control = "
1107 "RX PAUSE frames only.\r\n"); 1182 "RX PAUSE frames only.\r\n");
1108 } 1183 }
1109 } 1184 }
@@ -1114,14 +1189,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1114 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1189 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1115 *-------|---------|-------|---------|-------------------- 1190 *-------|---------|-------|---------|--------------------
1116 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1191 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1117 *
1118 */ 1192 */
1119 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 1193 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1120 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1194 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1121 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1195 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1122 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1196 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1123 hw->fc.current_mode = e1000_fc_tx_pause; 1197 hw->fc.current_mode = e1000_fc_tx_pause;
1124 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); 1198 e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
1125 } 1199 }
1126 /* 1200 /*
1127 * For transmitting PAUSE frames ONLY. 1201 * For transmitting PAUSE frames ONLY.
@@ -1130,21 +1204,20 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1130 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1204 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1131 *-------|---------|-------|---------|-------------------- 1205 *-------|---------|-------|---------|--------------------
1132 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1206 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1133 *
1134 */ 1207 */
1135 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1208 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1136 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1209 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1137 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1210 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1138 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1211 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1139 hw->fc.current_mode = e1000_fc_rx_pause; 1212 hw->fc.current_mode = e1000_fc_rx_pause;
1140 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); 1213 e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
1141 } else { 1214 } else {
1142 /* 1215 /*
1143 * Per the IEEE spec, at this point flow control 1216 * Per the IEEE spec, at this point flow control
1144 * should be disabled. 1217 * should be disabled.
1145 */ 1218 */
1146 hw->fc.current_mode = e1000_fc_none; 1219 hw->fc.current_mode = e1000_fc_none;
1147 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1220 e_dbg("Flow Control = NONE.\r\n");
1148 } 1221 }
1149 1222
1150 /* 1223 /*
@@ -1154,7 +1227,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1154 */ 1227 */
1155 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); 1228 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1156 if (ret_val) { 1229 if (ret_val) {
1157 hw_dbg(hw, "Error getting link speed and duplex\n"); 1230 e_dbg("Error getting link speed and duplex\n");
1158 return ret_val; 1231 return ret_val;
1159 } 1232 }
1160 1233
@@ -1167,7 +1240,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1167 */ 1240 */
1168 ret_val = e1000e_force_mac_fc(hw); 1241 ret_val = e1000e_force_mac_fc(hw);
1169 if (ret_val) { 1242 if (ret_val) {
1170 hw_dbg(hw, "Error forcing flow control settings\n"); 1243 e_dbg("Error forcing flow control settings\n");
1171 return ret_val; 1244 return ret_val;
1172 } 1245 }
1173 } 1246 }
@@ -1191,21 +1264,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1191 status = er32(STATUS); 1264 status = er32(STATUS);
1192 if (status & E1000_STATUS_SPEED_1000) { 1265 if (status & E1000_STATUS_SPEED_1000) {
1193 *speed = SPEED_1000; 1266 *speed = SPEED_1000;
1194 hw_dbg(hw, "1000 Mbs, "); 1267 e_dbg("1000 Mbs, ");
1195 } else if (status & E1000_STATUS_SPEED_100) { 1268 } else if (status & E1000_STATUS_SPEED_100) {
1196 *speed = SPEED_100; 1269 *speed = SPEED_100;
1197 hw_dbg(hw, "100 Mbs, "); 1270 e_dbg("100 Mbs, ");
1198 } else { 1271 } else {
1199 *speed = SPEED_10; 1272 *speed = SPEED_10;
1200 hw_dbg(hw, "10 Mbs, "); 1273 e_dbg("10 Mbs, ");
1201 } 1274 }
1202 1275
1203 if (status & E1000_STATUS_FD) { 1276 if (status & E1000_STATUS_FD) {
1204 *duplex = FULL_DUPLEX; 1277 *duplex = FULL_DUPLEX;
1205 hw_dbg(hw, "Full Duplex\n"); 1278 e_dbg("Full Duplex\n");
1206 } else { 1279 } else {
1207 *duplex = HALF_DUPLEX; 1280 *duplex = HALF_DUPLEX;
1208 hw_dbg(hw, "Half Duplex\n"); 1281 e_dbg("Half Duplex\n");
1209 } 1282 }
1210 1283
1211 return 0; 1284 return 0;
@@ -1251,7 +1324,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1251 } 1324 }
1252 1325
1253 if (i == timeout) { 1326 if (i == timeout) {
1254 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); 1327 e_dbg("Driver can't access device - SMBI bit is set.\n");
1255 return -E1000_ERR_NVM; 1328 return -E1000_ERR_NVM;
1256 } 1329 }
1257 1330
@@ -1270,7 +1343,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1270 if (i == timeout) { 1343 if (i == timeout) {
1271 /* Release semaphores */ 1344 /* Release semaphores */
1272 e1000e_put_hw_semaphore(hw); 1345 e1000e_put_hw_semaphore(hw);
1273 hw_dbg(hw, "Driver can't access the NVM\n"); 1346 e_dbg("Driver can't access the NVM\n");
1274 return -E1000_ERR_NVM; 1347 return -E1000_ERR_NVM;
1275 } 1348 }
1276 1349
@@ -1310,7 +1383,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1310 } 1383 }
1311 1384
1312 if (i == AUTO_READ_DONE_TIMEOUT) { 1385 if (i == AUTO_READ_DONE_TIMEOUT) {
1313 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); 1386 e_dbg("Auto read by HW from NVM has not completed.\n");
1314 return -E1000_ERR_RESET; 1387 return -E1000_ERR_RESET;
1315 } 1388 }
1316 1389
@@ -1331,7 +1404,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1331 1404
1332 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1405 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1333 if (ret_val) { 1406 if (ret_val) {
1334 hw_dbg(hw, "NVM Read Error\n"); 1407 e_dbg("NVM Read Error\n");
1335 return ret_val; 1408 return ret_val;
1336 } 1409 }
1337 1410
@@ -1585,7 +1658,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1585 } 1658 }
1586 1659
1587 if (!timeout) { 1660 if (!timeout) {
1588 hw_dbg(hw, "Master requests are pending.\n"); 1661 e_dbg("Master requests are pending.\n");
1589 return -E1000_ERR_MASTER_REQUESTS_PENDING; 1662 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1590 } 1663 }
1591 1664
@@ -1602,14 +1675,21 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1602{ 1675{
1603 struct e1000_mac_info *mac = &hw->mac; 1676 struct e1000_mac_info *mac = &hw->mac;
1604 1677
1678 if (!mac->adaptive_ifs) {
1679 e_dbg("Not in Adaptive IFS mode!\n");
1680 goto out;
1681 }
1682
1605 mac->current_ifs_val = 0; 1683 mac->current_ifs_val = 0;
1606 mac->ifs_min_val = IFS_MIN; 1684 mac->ifs_min_val = IFS_MIN;
1607 mac->ifs_max_val = IFS_MAX; 1685 mac->ifs_max_val = IFS_MAX;
1608 mac->ifs_step_size = IFS_STEP; 1686 mac->ifs_step_size = IFS_STEP;
1609 mac->ifs_ratio = IFS_RATIO; 1687 mac->ifs_ratio = IFS_RATIO;
1610 1688
1611 mac->in_ifs_mode = 0; 1689 mac->in_ifs_mode = false;
1612 ew32(AIT, 0); 1690 ew32(AIT, 0);
1691out:
1692 return;
1613} 1693}
1614 1694
1615/** 1695/**
@@ -1623,9 +1703,14 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1623{ 1703{
1624 struct e1000_mac_info *mac = &hw->mac; 1704 struct e1000_mac_info *mac = &hw->mac;
1625 1705
1706 if (!mac->adaptive_ifs) {
1707 e_dbg("Not in Adaptive IFS mode!\n");
1708 goto out;
1709 }
1710
1626 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1711 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1627 if (mac->tx_packet_delta > MIN_NUM_XMITS) { 1712 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1628 mac->in_ifs_mode = 1; 1713 mac->in_ifs_mode = true;
1629 if (mac->current_ifs_val < mac->ifs_max_val) { 1714 if (mac->current_ifs_val < mac->ifs_max_val) {
1630 if (!mac->current_ifs_val) 1715 if (!mac->current_ifs_val)
1631 mac->current_ifs_val = mac->ifs_min_val; 1716 mac->current_ifs_val = mac->ifs_min_val;
@@ -1639,10 +1724,12 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1639 if (mac->in_ifs_mode && 1724 if (mac->in_ifs_mode &&
1640 (mac->tx_packet_delta <= MIN_NUM_XMITS)) { 1725 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1641 mac->current_ifs_val = 0; 1726 mac->current_ifs_val = 0;
1642 mac->in_ifs_mode = 0; 1727 mac->in_ifs_mode = false;
1643 ew32(AIT, 0); 1728 ew32(AIT, 0);
1644 } 1729 }
1645 } 1730 }
1731out:
1732 return;
1646} 1733}
1647 1734
1648/** 1735/**
@@ -1809,7 +1896,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1809 if (!timeout) { 1896 if (!timeout) {
1810 eecd &= ~E1000_EECD_REQ; 1897 eecd &= ~E1000_EECD_REQ;
1811 ew32(EECD, eecd); 1898 ew32(EECD, eecd);
1812 hw_dbg(hw, "Could not acquire NVM grant\n"); 1899 e_dbg("Could not acquire NVM grant\n");
1813 return -E1000_ERR_NVM; 1900 return -E1000_ERR_NVM;
1814 } 1901 }
1815 1902
@@ -1914,7 +2001,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1914 } 2001 }
1915 2002
1916 if (!timeout) { 2003 if (!timeout) {
1917 hw_dbg(hw, "SPI NVM Status error\n"); 2004 e_dbg("SPI NVM Status error\n");
1918 return -E1000_ERR_NVM; 2005 return -E1000_ERR_NVM;
1919 } 2006 }
1920 } 2007 }
@@ -1943,7 +2030,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1943 */ 2030 */
1944 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 2031 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1945 (words == 0)) { 2032 (words == 0)) {
1946 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 2033 e_dbg("nvm parameter(s) out of bounds\n");
1947 return -E1000_ERR_NVM; 2034 return -E1000_ERR_NVM;
1948 } 2035 }
1949 2036
@@ -1986,11 +2073,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1986 */ 2073 */
1987 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 2074 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988 (words == 0)) { 2075 (words == 0)) {
1989 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 2076 e_dbg("nvm parameter(s) out of bounds\n");
1990 return -E1000_ERR_NVM; 2077 return -E1000_ERR_NVM;
1991 } 2078 }
1992 2079
1993 ret_val = nvm->ops.acquire_nvm(hw); 2080 ret_val = nvm->ops.acquire(hw);
1994 if (ret_val) 2081 if (ret_val)
1995 return ret_val; 2082 return ret_val;
1996 2083
@@ -2001,7 +2088,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2001 2088
2002 ret_val = e1000_ready_nvm_eeprom(hw); 2089 ret_val = e1000_ready_nvm_eeprom(hw);
2003 if (ret_val) { 2090 if (ret_val) {
2004 nvm->ops.release_nvm(hw); 2091 nvm->ops.release(hw);
2005 return ret_val; 2092 return ret_val;
2006 } 2093 }
2007 2094
@@ -2040,72 +2127,32 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2040 } 2127 }
2041 2128
2042 msleep(10); 2129 msleep(10);
2043 nvm->ops.release_nvm(hw); 2130 nvm->ops.release(hw);
2044 return 0; 2131 return 0;
2045} 2132}
2046 2133
2047/** 2134/**
2048 * e1000e_read_mac_addr - Read device MAC address 2135 * e1000_read_mac_addr_generic - Read device MAC address
2049 * @hw: pointer to the HW structure 2136 * @hw: pointer to the HW structure
2050 * 2137 *
2051 * Reads the device MAC address from the EEPROM and stores the value. 2138 * Reads the device MAC address from the EEPROM and stores the value.
2052 * Since devices with two ports use the same EEPROM, we increment the 2139 * Since devices with two ports use the same EEPROM, we increment the
2053 * last bit in the MAC address for the second port. 2140 * last bit in the MAC address for the second port.
2054 **/ 2141 **/
2055s32 e1000e_read_mac_addr(struct e1000_hw *hw) 2142s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2056{ 2143{
2057 s32 ret_val; 2144 u32 rar_high;
2058 u16 offset, nvm_data, i; 2145 u32 rar_low;
2059 u16 mac_addr_offset = 0; 2146 u16 i;
2060
2061 if (hw->mac.type == e1000_82571) {
2062 /* Check for an alternate MAC address. An alternate MAC
2063 * address can be setup by pre-boot software and must be
2064 * treated like a permanent address and must override the
2065 * actual permanent MAC address.*/
2066 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2067 &mac_addr_offset);
2068 if (ret_val) {
2069 hw_dbg(hw, "NVM Read Error\n");
2070 return ret_val;
2071 }
2072 if (mac_addr_offset == 0xFFFF)
2073 mac_addr_offset = 0;
2074
2075 if (mac_addr_offset) {
2076 if (hw->bus.func == E1000_FUNC_1)
2077 mac_addr_offset += ETH_ALEN/sizeof(u16);
2078
2079 /* make sure we have a valid mac address here
2080 * before using it */
2081 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2082 &nvm_data);
2083 if (ret_val) {
2084 hw_dbg(hw, "NVM Read Error\n");
2085 return ret_val;
2086 }
2087 if (nvm_data & 0x0001)
2088 mac_addr_offset = 0;
2089 }
2090 2147
2091 if (mac_addr_offset) 2148 rar_high = er32(RAH(0));
2092 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2149 rar_low = er32(RAL(0));
2093 }
2094 2150
2095 for (i = 0; i < ETH_ALEN; i += 2) { 2151 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2096 offset = mac_addr_offset + (i >> 1); 2152 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2097 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2098 if (ret_val) {
2099 hw_dbg(hw, "NVM Read Error\n");
2100 return ret_val;
2101 }
2102 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2103 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2104 }
2105 2153
2106 /* Flip last bit of mac address if we're on second port */ 2154 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2107 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) 2155 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2108 hw->mac.perm_addr[5] ^= 1;
2109 2156
2110 for (i = 0; i < ETH_ALEN; i++) 2157 for (i = 0; i < ETH_ALEN; i++)
2111 hw->mac.addr[i] = hw->mac.perm_addr[i]; 2158 hw->mac.addr[i] = hw->mac.perm_addr[i];
@@ -2129,14 +2176,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2129 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 2176 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2130 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2177 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2131 if (ret_val) { 2178 if (ret_val) {
2132 hw_dbg(hw, "NVM Read Error\n"); 2179 e_dbg("NVM Read Error\n");
2133 return ret_val; 2180 return ret_val;
2134 } 2181 }
2135 checksum += nvm_data; 2182 checksum += nvm_data;
2136 } 2183 }
2137 2184
2138 if (checksum != (u16) NVM_SUM) { 2185 if (checksum != (u16) NVM_SUM) {
2139 hw_dbg(hw, "NVM Checksum Invalid\n"); 2186 e_dbg("NVM Checksum Invalid\n");
2140 return -E1000_ERR_NVM; 2187 return -E1000_ERR_NVM;
2141 } 2188 }
2142 2189
@@ -2160,7 +2207,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2160 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 2207 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2161 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2208 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2162 if (ret_val) { 2209 if (ret_val) {
2163 hw_dbg(hw, "NVM Read Error while updating checksum.\n"); 2210 e_dbg("NVM Read Error while updating checksum.\n");
2164 return ret_val; 2211 return ret_val;
2165 } 2212 }
2166 checksum += nvm_data; 2213 checksum += nvm_data;
@@ -2168,7 +2215,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2168 checksum = (u16) NVM_SUM - checksum; 2215 checksum = (u16) NVM_SUM - checksum;
2169 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 2216 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2170 if (ret_val) 2217 if (ret_val)
2171 hw_dbg(hw, "NVM Write Error while updating checksum.\n"); 2218 e_dbg("NVM Write Error while updating checksum.\n");
2172 2219
2173 return ret_val; 2220 return ret_val;
2174} 2221}
@@ -2231,7 +2278,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2231 /* Check that the host interface is enabled. */ 2278 /* Check that the host interface is enabled. */
2232 hicr = er32(HICR); 2279 hicr = er32(HICR);
2233 if ((hicr & E1000_HICR_EN) == 0) { 2280 if ((hicr & E1000_HICR_EN) == 0) {
2234 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); 2281 e_dbg("E1000_HOST_EN bit disabled.\n");
2235 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2282 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2236 } 2283 }
2237 /* check the previous command is completed */ 2284 /* check the previous command is completed */
@@ -2243,7 +2290,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2243 } 2290 }
2244 2291
2245 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 2292 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2246 hw_dbg(hw, "Previous command timeout failed .\n"); 2293 e_dbg("Previous command timeout failed .\n");
2247 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2294 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2248 } 2295 }
2249 2296
@@ -2280,10 +2327,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2280 s32 ret_val, hdr_csum, csum; 2327 s32 ret_val, hdr_csum, csum;
2281 u8 i, len; 2328 u8 i, len;
2282 2329
2330 hw->mac.tx_pkt_filtering = true;
2331
2283 /* No manageability, no filtering */ 2332 /* No manageability, no filtering */
2284 if (!e1000e_check_mng_mode(hw)) { 2333 if (!e1000e_check_mng_mode(hw)) {
2285 hw->mac.tx_pkt_filtering = 0; 2334 hw->mac.tx_pkt_filtering = false;
2286 return 0; 2335 goto out;
2287 } 2336 }
2288 2337
2289 /* 2338 /*
@@ -2291,9 +2340,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2291 * reason, disable filtering. 2340 * reason, disable filtering.
2292 */ 2341 */
2293 ret_val = e1000_mng_enable_host_if(hw); 2342 ret_val = e1000_mng_enable_host_if(hw);
2294 if (ret_val != 0) { 2343 if (ret_val) {
2295 hw->mac.tx_pkt_filtering = 0; 2344 hw->mac.tx_pkt_filtering = false;
2296 return ret_val; 2345 goto out;
2297 } 2346 }
2298 2347
2299 /* Read in the header. Length and offset are in dwords. */ 2348 /* Read in the header. Length and offset are in dwords. */
@@ -2311,18 +2360,18 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2311 * take the safe route of assuming Tx filtering is enabled. 2360 * take the safe route of assuming Tx filtering is enabled.
2312 */ 2361 */
2313 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2362 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2314 hw->mac.tx_pkt_filtering = 1; 2363 hw->mac.tx_pkt_filtering = true;
2315 return 1; 2364 goto out;
2316 } 2365 }
2317 2366
2318 /* Cookie area is valid, make the final check for filtering. */ 2367 /* Cookie area is valid, make the final check for filtering. */
2319 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2368 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2320 hw->mac.tx_pkt_filtering = 0; 2369 hw->mac.tx_pkt_filtering = false;
2321 return 0; 2370 goto out;
2322 } 2371 }
2323 2372
2324 hw->mac.tx_pkt_filtering = 1; 2373out:
2325 return 1; 2374 return hw->mac.tx_pkt_filtering;
2326} 2375}
2327 2376
2328/** 2377/**
@@ -2353,7 +2402,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2353} 2402}
2354 2403
2355/** 2404/**
2356 * e1000_mng_host_if_write - Writes to the manageability host interface 2405 * e1000_mng_host_if_write - Write to the manageability host interface
2357 * @hw: pointer to the HW structure 2406 * @hw: pointer to the HW structure
2358 * @buffer: pointer to the host interface buffer 2407 * @buffer: pointer to the host interface buffer
2359 * @length: size of the buffer 2408 * @length: size of the buffer
@@ -2478,7 +2527,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2478{ 2527{
2479 u32 manc; 2528 u32 manc;
2480 u32 fwsm, factps; 2529 u32 fwsm, factps;
2481 bool ret_val = 0; 2530 bool ret_val = false;
2482 2531
2483 manc = er32(MANC); 2532 manc = er32(MANC);
2484 2533
@@ -2493,13 +2542,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2493 if (!(factps & E1000_FACTPS_MNGCG) && 2542 if (!(factps & E1000_FACTPS_MNGCG) &&
2494 ((fwsm & E1000_FWSM_MODE_MASK) == 2543 ((fwsm & E1000_FWSM_MODE_MASK) ==
2495 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 2544 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2496 ret_val = 1; 2545 ret_val = true;
2497 return ret_val; 2546 return ret_val;
2498 } 2547 }
2499 } else { 2548 } else {
2500 if ((manc & E1000_MANC_SMBUS_EN) && 2549 if ((manc & E1000_MANC_SMBUS_EN) &&
2501 !(manc & E1000_MANC_ASF_EN)) { 2550 !(manc & E1000_MANC_ASF_EN)) {
2502 ret_val = 1; 2551 ret_val = true;
2503 return ret_val; 2552 return ret_val;
2504 } 2553 }
2505 } 2554 }
@@ -2514,14 +2563,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2514 2563
2515 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 2564 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2516 if (ret_val) { 2565 if (ret_val) {
2517 hw_dbg(hw, "NVM Read Error\n"); 2566 e_dbg("NVM Read Error\n");
2518 return ret_val; 2567 return ret_val;
2519 } 2568 }
2520 *pba_num = (u32)(nvm_data << 16); 2569 *pba_num = (u32)(nvm_data << 16);
2521 2570
2522 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 2571 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2523 if (ret_val) { 2572 if (ret_val) {
2524 hw_dbg(hw, "NVM Read Error\n"); 2573 e_dbg("NVM Read Error\n");
2525 return ret_val; 2574 return ret_val;
2526 } 2575 }
2527 *pba_num |= nvm_data; 2576 *pba_num |= nvm_data;