aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/lib.c
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2007-09-17 15:30:59 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:50:40 -0400
commitbc7f75fa97884d41efbfde1397b621fefb2550b4 (patch)
tree037910bdb72ae1c1fc179f47beb1f9d00803dbf5 /drivers/net/e1000e/lib.c
parentcbdb9e43d1fc50cfa509b1006e7252dc4ea53aa0 (diff)
[E1000E]: New pci-express e1000 driver (currently for ICH9 devices only)
This driver implements support for the ICH9 on-board LAN ethernet device. The device is similar to ICH8. The driver encompasses code to support 82571/2/3, es2lan and ICH8 devices as well, but those device IDs are disabled and will be "lifted" from the e1000 driver over one at a time once this driver receives some more live time. Changes to the last snapshot posted are exclusively in the internal hardware API organization. Many thanks to Jeff Garzik for jumping in and getting this organized with a keen eye on the future layout. [ Integrated napi_struct patch from Auke as well... -DaveM ] Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000e/lib.c')
-rw-r--r--drivers/net/e1000e/lib.c2487
1 files changed, 2487 insertions, 0 deletions
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
new file mode 100644
index 000000000000..3bbfe605e111
--- /dev/null
+++ b/drivers/net/e1000e/lib.c
@@ -0,0 +1,2487 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33
34#include "e1000.h"
35
36enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42};
43
44#define E1000_FACTPS_MNGCG 0x20000000
45
46#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management
47 * Technology signature */
48
49/**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
52 *
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58{
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87}
88
89/**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
94 *
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99{
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102}
103
104/**
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
108 *
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
112 **/
113void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114{
115 u32 i;
116
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130}
131
132/**
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
137 *
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
140 **/
141void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142{
143 u32 rar_low, rar_high;
144
145 /* HW expects these in little endian so we reverse the byte order
146 * from network order (big endian) to little endian
147 */
148 rar_low = ((u32) addr[0] |
149 ((u32) addr[1] << 8) |
150 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
151
152 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
153
154 rar_high |= E1000_RAH_AV;
155
156 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
158}
159
160/**
161 * e1000_mta_set - Set multicast filter table address
162 * @hw: pointer to the HW structure
163 * @hash_value: determines the MTA register and bit to set
164 *
165 * The multicast table address is a register array of 32-bit registers.
166 * The hash_value is used to determine what register the bit is in, the
167 * current value is read, the new bit is OR'd in and the new value is
168 * written back into the register.
169 **/
170static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171{
172 u32 hash_bit, hash_reg, mta;
173
174 /* The MTA is a register array of 32-bit registers. It is
175 * treated like an array of (32*mta_reg_count) bits. We want to
176 * set bit BitArray[hash_value]. So we figure out what register
177 * the bit is in, read it, OR in the new bit, then write
178 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
179 * mask to bits 31:5 of the hash value which gives us the
180 * register we're modifying. The hash bit within that register
181 * is determined by the lower 5 bits of the hash value.
182 */
183 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
184 hash_bit = hash_value & 0x1F;
185
186 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
187
188 mta |= (1 << hash_bit);
189
190 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
191 e1e_flush();
192}
193
194/**
195 * e1000_hash_mc_addr - Generate a multicast hash value
196 * @hw: pointer to the HW structure
197 * @mc_addr: pointer to a multicast address
198 *
199 * Generates a multicast address hash value which is used to determine
200 * the multicast filter table array address and new table value. See
201 * e1000_mta_set_generic()
202 **/
203static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
204{
205 u32 hash_value, hash_mask;
206 u8 bit_shift = 0;
207
208 /* Register count multiplied by bits per register */
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210
211 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
212 * where 0xFF would still fall within the hash mask. */
213 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++;
215
216 /* The portion of the address that is used for the hash table
217 * is determined by the mc_filter_type setting.
218 * The algorithm is such that there is a total of 8 bits of shifting.
219 * The bit_shift for a mc_filter_type of 0 represents the number of
220 * left-shifts where the MSB of mc_addr[5] would still fall within
221 * the hash_mask. Case 0 does this exactly. Since there are a total
222 * of 8 bits of shifting, then mc_addr[4] will shift right the
223 * remaining number of bits. Thus 8 - bit_shift. The rest of the
224 * cases are a variation of this algorithm...essentially raising the
225 * number of bits to shift mc_addr[5] left, while still keeping the
226 * 8-bit shifting total.
227 */
228 /* For example, given the following Destination MAC Address and an
229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230 * we can see that the bit_shift for case 0 is 4. These are the hash
231 * values resulting from each mc_filter_type...
232 * [0] [1] [2] [3] [4] [5]
233 * 01 AA 00 12 34 56
234 * LSB MSB
235 *
236 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
237 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
238 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
239 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
240 */
241 switch (hw->mac.mc_filter_type) {
242 default:
243 case 0:
244 break;
245 case 1:
246 bit_shift += 1;
247 break;
248 case 2:
249 bit_shift += 2;
250 break;
251 case 3:
252 bit_shift += 4;
253 break;
254 }
255
256 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
257 (((u16) mc_addr[5]) << bit_shift)));
258
259 return hash_value;
260}
261
262/**
263 * e1000e_mc_addr_list_update_generic - Update Multicast addresses
264 * @hw: pointer to the HW structure
265 * @mc_addr_list: array of multicast addresses to program
266 * @mc_addr_count: number of multicast addresses to program
267 * @rar_used_count: the first RAR register free to program
268 * @rar_count: total number of supported Receive Address Registers
269 *
270 * Updates the Receive Address Registers and Multicast Table Array.
271 * The caller must have a packed mc_addr_list of multicast addresses.
272 * The parameter rar_count will usually be hw->mac.rar_entry_count
273 * unless there are workarounds that change this.
274 **/
275void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
276 u8 *mc_addr_list, u32 mc_addr_count,
277 u32 rar_used_count, u32 rar_count)
278{
279 u32 hash_value;
280 u32 i;
281
282 /* Load the first set of multicast addresses into the exact
283 * filters (RAR). If there are not enough to fill the RAR
284 * array, clear the filters.
285 */
286 for (i = rar_used_count; i < rar_count; i++) {
287 if (mc_addr_count) {
288 e1000e_rar_set(hw, mc_addr_list, i);
289 mc_addr_count--;
290 mc_addr_list += ETH_ALEN;
291 } else {
292 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
293 e1e_flush();
294 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
295 e1e_flush();
296 }
297 }
298
299 /* Clear the old settings from the MTA */
300 hw_dbg(hw, "Clearing MTA\n");
301 for (i = 0; i < hw->mac.mta_reg_count; i++) {
302 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
303 e1e_flush();
304 }
305
306 /* Load any remaining multicast addresses into the hash table. */
307 for (; mc_addr_count > 0; mc_addr_count--) {
308 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
309 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
310 e1000_mta_set(hw, hash_value);
311 mc_addr_list += ETH_ALEN;
312 }
313}
314
315/**
316 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
317 * @hw: pointer to the HW structure
318 *
319 * Clears the base hardware counters by reading the counter registers.
320 **/
321void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
322{
323 u32 temp;
324
325 temp = er32(CRCERRS);
326 temp = er32(SYMERRS);
327 temp = er32(MPC);
328 temp = er32(SCC);
329 temp = er32(ECOL);
330 temp = er32(MCC);
331 temp = er32(LATECOL);
332 temp = er32(COLC);
333 temp = er32(DC);
334 temp = er32(SEC);
335 temp = er32(RLEC);
336 temp = er32(XONRXC);
337 temp = er32(XONTXC);
338 temp = er32(XOFFRXC);
339 temp = er32(XOFFTXC);
340 temp = er32(FCRUC);
341 temp = er32(GPRC);
342 temp = er32(BPRC);
343 temp = er32(MPRC);
344 temp = er32(GPTC);
345 temp = er32(GORCL);
346 temp = er32(GORCH);
347 temp = er32(GOTCL);
348 temp = er32(GOTCH);
349 temp = er32(RNBC);
350 temp = er32(RUC);
351 temp = er32(RFC);
352 temp = er32(ROC);
353 temp = er32(RJC);
354 temp = er32(TORL);
355 temp = er32(TORH);
356 temp = er32(TOTL);
357 temp = er32(TOTH);
358 temp = er32(TPR);
359 temp = er32(TPT);
360 temp = er32(MPTC);
361 temp = er32(BPTC);
362}
363
364/**
365 * e1000e_check_for_copper_link - Check for link (Copper)
366 * @hw: pointer to the HW structure
367 *
368 * Checks to see of the link status of the hardware has changed. If a
369 * change in link status has been detected, then we read the PHY registers
370 * to get the current speed/duplex if link exists.
371 **/
372s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
373{
374 struct e1000_mac_info *mac = &hw->mac;
375 s32 ret_val;
376 bool link;
377
378 /* We only want to go out to the PHY registers to see if Auto-Neg
379 * has completed and/or if our link status has changed. The
380 * get_link_status flag is set upon receiving a Link Status
381 * Change or Rx Sequence Error interrupt.
382 */
383 if (!mac->get_link_status)
384 return 0;
385
386 /* First we want to see if the MII Status Register reports
387 * link. If so, then we want to get the current speed/duplex
388 * of the PHY.
389 */
390 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
391 if (ret_val)
392 return ret_val;
393
394 if (!link)
395 return ret_val; /* No link detected */
396
397 mac->get_link_status = 0;
398
399 /* Check if there was DownShift, must be checked
400 * immediately after link-up */
401 e1000e_check_downshift(hw);
402
403 /* If we are forcing speed/duplex, then we simply return since
404 * we have already determined whether we have link or not.
405 */
406 if (!mac->autoneg) {
407 ret_val = -E1000_ERR_CONFIG;
408 return ret_val;
409 }
410
411 /* Auto-Neg is enabled. Auto Speed Detection takes care
412 * of MAC speed/duplex configuration. So we only need to
413 * configure Collision Distance in the MAC.
414 */
415 e1000e_config_collision_dist(hw);
416
417 /* Configure Flow Control now that Auto-Neg has completed.
418 * First, we need to restore the desired flow control
419 * settings because we may have had to re-autoneg with a
420 * different link partner.
421 */
422 ret_val = e1000e_config_fc_after_link_up(hw);
423 if (ret_val) {
424 hw_dbg(hw, "Error configuring flow control\n");
425 }
426
427 return ret_val;
428}
429
430/**
431 * e1000e_check_for_fiber_link - Check for link (Fiber)
432 * @hw: pointer to the HW structure
433 *
434 * Checks for link up on the hardware. If link is not up and we have
435 * a signal, then we need to force link up.
436 **/
437s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
438{
439 struct e1000_mac_info *mac = &hw->mac;
440 u32 rxcw;
441 u32 ctrl;
442 u32 status;
443 s32 ret_val;
444
445 ctrl = er32(CTRL);
446 status = er32(STATUS);
447 rxcw = er32(RXCW);
448
449 /* If we don't have link (auto-negotiation failed or link partner
450 * cannot auto-negotiate), the cable is plugged in (we have signal),
451 * and our link partner is not trying to auto-negotiate with us (we
452 * are receiving idles or data), we need to force link up. We also
453 * need to give auto-negotiation time to complete, in case the cable
454 * was just plugged in. The autoneg_failed flag does this.
455 */
456 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
457 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
458 (!(rxcw & E1000_RXCW_C))) {
459 if (mac->autoneg_failed == 0) {
460 mac->autoneg_failed = 1;
461 return 0;
462 }
463 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
464
465 /* Disable auto-negotiation in the TXCW register */
466 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
467
468 /* Force link-up and also force full-duplex. */
469 ctrl = er32(CTRL);
470 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
471 ew32(CTRL, ctrl);
472
473 /* Configure Flow Control after forcing link up. */
474 ret_val = e1000e_config_fc_after_link_up(hw);
475 if (ret_val) {
476 hw_dbg(hw, "Error configuring flow control\n");
477 return ret_val;
478 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480 /* If we are forcing link and we are receiving /C/ ordered
481 * sets, re-enable auto-negotiation in the TXCW register
482 * and disable forced link in the Device Control register
483 * in an attempt to auto-negotiate with our link partner.
484 */
485 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
486 ew32(TXCW, mac->txcw);
487 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
488
489 mac->serdes_has_link = 1;
490 }
491
492 return 0;
493}
494
495/**
496 * e1000e_check_for_serdes_link - Check for link (Serdes)
497 * @hw: pointer to the HW structure
498 *
499 * Checks for link up on the hardware. If link is not up and we have
500 * a signal, then we need to force link up.
501 **/
502s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
503{
504 struct e1000_mac_info *mac = &hw->mac;
505 u32 rxcw;
506 u32 ctrl;
507 u32 status;
508 s32 ret_val;
509
510 ctrl = er32(CTRL);
511 status = er32(STATUS);
512 rxcw = er32(RXCW);
513
514 /* If we don't have link (auto-negotiation failed or link partner
515 * cannot auto-negotiate), and our link partner is not trying to
516 * auto-negotiate with us (we are receiving idles or data),
517 * we need to force link up. We also need to give auto-negotiation
518 * time to complete.
519 */
520 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
521 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
522 if (mac->autoneg_failed == 0) {
523 mac->autoneg_failed = 1;
524 return 0;
525 }
526 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
527
528 /* Disable auto-negotiation in the TXCW register */
529 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
530
531 /* Force link-up and also force full-duplex. */
532 ctrl = er32(CTRL);
533 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
534 ew32(CTRL, ctrl);
535
536 /* Configure Flow Control after forcing link up. */
537 ret_val = e1000e_config_fc_after_link_up(hw);
538 if (ret_val) {
539 hw_dbg(hw, "Error configuring flow control\n");
540 return ret_val;
541 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543 /* If we are forcing link and we are receiving /C/ ordered
544 * sets, re-enable auto-negotiation in the TXCW register
545 * and disable forced link in the Device Control register
546 * in an attempt to auto-negotiate with our link partner.
547 */
548 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
549 ew32(TXCW, mac->txcw);
550 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
551
552 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554 /* If we force link for non-auto-negotiation switch, check
555 * link status based on MAC synchronization for internal
556 * serdes media type.
557 */
558 /* SYNCH bit and IV bit are sticky. */
559 udelay(10);
560 if (E1000_RXCW_SYNCH & er32(RXCW)) {
561 if (!(rxcw & E1000_RXCW_IV)) {
562 mac->serdes_has_link = 1;
563 hw_dbg(hw, "SERDES: Link is up.\n");
564 }
565 } else {
566 mac->serdes_has_link = 0;
567 hw_dbg(hw, "SERDES: Link is down.\n");
568 }
569 }
570
571 if (E1000_TXCW_ANE & er32(TXCW)) {
572 status = er32(STATUS);
573 mac->serdes_has_link = (status & E1000_STATUS_LU);
574 }
575
576 return 0;
577}
578
579/**
580 * e1000_set_default_fc_generic - Set flow control default values
581 * @hw: pointer to the HW structure
582 *
583 * Read the EEPROM for the default values for flow control and store the
584 * values.
585 **/
586static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587{
588 struct e1000_mac_info *mac = &hw->mac;
589 s32 ret_val;
590 u16 nvm_data;
591
592 if (mac->fc != e1000_fc_default)
593 return 0;
594
595 /* Read and store word 0x0F of the EEPROM. This word contains bits
596 * that determine the hardware's default PAUSE (flow control) mode,
597 * a bit that determines whether the HW defaults to enabling or
598 * disabling auto-negotiation, and the direction of the
599 * SW defined pins. If there is no SW over-ride of the flow
600 * control setting, then the variable hw->fc will
601 * be initialized based on a value in the EEPROM.
602 */
603 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
604
605 if (ret_val) {
606 hw_dbg(hw, "NVM Read Error\n");
607 return ret_val;
608 }
609
610 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
611 mac->fc = e1000_fc_none;
612 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
613 NVM_WORD0F_ASM_DIR)
614 mac->fc = e1000_fc_tx_pause;
615 else
616 mac->fc = e1000_fc_full;
617
618 return 0;
619}
620
621/**
622 * e1000e_setup_link - Setup flow control and link settings
623 * @hw: pointer to the HW structure
624 *
625 * Determines which flow control settings to use, then configures flow
626 * control. Calls the appropriate media-specific link configuration
627 * function. Assuming the adapter has a valid link partner, a valid link
628 * should be established. Assumes the hardware has previously been reset
629 * and the transmitter and receiver are not enabled.
630 **/
631s32 e1000e_setup_link(struct e1000_hw *hw)
632{
633 struct e1000_mac_info *mac = &hw->mac;
634 s32 ret_val;
635
636 /* In the case of the phy reset being blocked, we already have a link.
637 * We do not need to set it up again.
638 */
639 if (e1000_check_reset_block(hw))
640 return 0;
641
642 ret_val = e1000_set_default_fc_generic(hw);
643 if (ret_val)
644 return ret_val;
645
646 /* We want to save off the original Flow Control configuration just
647 * in case we get disconnected and then reconnected into a different
648 * hub or switch with different Flow Control capabilities.
649 */
650 mac->original_fc = mac->fc;
651
652 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
653
654 /* Call the necessary media_type subroutine to configure the link. */
655 ret_val = mac->ops.setup_physical_interface(hw);
656 if (ret_val)
657 return ret_val;
658
659 /* Initialize the flow control address, type, and PAUSE timer
660 * registers to their default values. This is done even if flow
661 * control is disabled, because it does not hurt anything to
662 * initialize these registers.
663 */
664 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
665 ew32(FCT, FLOW_CONTROL_TYPE);
666 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
667 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
668
669 ew32(FCTTV, mac->fc_pause_time);
670
671 return e1000e_set_fc_watermarks(hw);
672}
673
674/**
675 * e1000_commit_fc_settings_generic - Configure flow control
676 * @hw: pointer to the HW structure
677 *
678 * Write the flow control settings to the Transmit Config Word Register (TXCW)
679 * base on the flow control settings in e1000_mac_info.
680 **/
681static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
682{
683 struct e1000_mac_info *mac = &hw->mac;
684 u32 txcw;
685
686 /* Check for a software override of the flow control settings, and
687 * setup the device accordingly. If auto-negotiation is enabled, then
688 * software will have to set the "PAUSE" bits to the correct value in
689 * the Transmit Config Word Register (TXCW) and re-start auto-
690 * negotiation. However, if auto-negotiation is disabled, then
691 * software will have to manually configure the two flow control enable
692 * bits in the CTRL register.
693 *
694 * The possible values of the "fc" parameter are:
695 * 0: Flow control is completely disabled
696 * 1: Rx flow control is enabled (we can receive pause frames,
697 * but not send pause frames).
698 * 2: Tx flow control is enabled (we can send pause frames but we
699 * do not support receiving pause frames).
700 * 3: Both Rx and TX flow control (symmetric) are enabled.
701 */
702 switch (mac->fc) {
703 case e1000_fc_none:
704 /* Flow control completely disabled by a software over-ride. */
705 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
706 break;
707 case e1000_fc_rx_pause:
708 /* RX Flow control is enabled and TX Flow control is disabled
709 * by a software over-ride. Since there really isn't a way to
710 * advertise that we are capable of RX Pause ONLY, we will
711 * advertise that we support both symmetric and asymmetric RX
712 * PAUSE. Later, we will disable the adapter's ability to send
713 * PAUSE frames.
714 */
715 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
716 break;
717 case e1000_fc_tx_pause:
718 /* TX Flow control is enabled, and RX Flow control is disabled,
719 * by a software over-ride.
720 */
721 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
722 break;
723 case e1000_fc_full:
724 /* Flow control (both RX and TX) is enabled by a software
725 * over-ride.
726 */
727 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
728 break;
729 default:
730 hw_dbg(hw, "Flow control param set incorrectly\n");
731 return -E1000_ERR_CONFIG;
732 break;
733 }
734
735 ew32(TXCW, txcw);
736 mac->txcw = txcw;
737
738 return 0;
739}
740
741/**
742 * e1000_poll_fiber_serdes_link_generic - Poll for link up
743 * @hw: pointer to the HW structure
744 *
745 * Polls for link up by reading the status register, if link fails to come
746 * up with auto-negotiation, then the link is forced if a signal is detected.
747 **/
748static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
749{
750 struct e1000_mac_info *mac = &hw->mac;
751 u32 i, status;
752 s32 ret_val;
753
754 /* If we have a signal (the cable is plugged in, or assumed true for
755 * serdes media) then poll for a "Link-Up" indication in the Device
756 * Status Register. Time-out if a link isn't seen in 500 milliseconds
757 * seconds (Auto-negotiation should complete in less than 500
758 * milliseconds even if the other end is doing it in SW).
759 */
760 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
761 msleep(10);
762 status = er32(STATUS);
763 if (status & E1000_STATUS_LU)
764 break;
765 }
766 if (i == FIBER_LINK_UP_LIMIT) {
767 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
768 mac->autoneg_failed = 1;
769 /* AutoNeg failed to achieve a link, so we'll call
770 * mac->check_for_link. This routine will force the
771 * link up if we detect a signal. This will allow us to
772 * communicate with non-autonegotiating link partners.
773 */
774 ret_val = mac->ops.check_for_link(hw);
775 if (ret_val) {
776 hw_dbg(hw, "Error while checking for link\n");
777 return ret_val;
778 }
779 mac->autoneg_failed = 0;
780 } else {
781 mac->autoneg_failed = 0;
782 hw_dbg(hw, "Valid Link Found\n");
783 }
784
785 return 0;
786}
787
788/**
789 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
790 * @hw: pointer to the HW structure
791 *
792 * Configures collision distance and flow control for fiber and serdes
793 * links. Upon successful setup, poll for link.
794 **/
795s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
796{
797 u32 ctrl;
798 s32 ret_val;
799
800 ctrl = er32(CTRL);
801
802 /* Take the link out of reset */
803 ctrl &= ~E1000_CTRL_LRST;
804
805 e1000e_config_collision_dist(hw);
806
807 ret_val = e1000_commit_fc_settings_generic(hw);
808 if (ret_val)
809 return ret_val;
810
811 /* Since auto-negotiation is enabled, take the link out of reset (the
812 * link will be in reset, because we previously reset the chip). This
813 * will restart auto-negotiation. If auto-negotiation is successful
814 * then the link-up status bit will be set and the flow control enable
815 * bits (RFCE and TFCE) will be set according to their negotiated value.
816 */
817 hw_dbg(hw, "Auto-negotiation enabled\n");
818
819 ew32(CTRL, ctrl);
820 e1e_flush();
821 msleep(1);
822
823 /* For these adapters, the SW defineable pin 1 is set when the optics
824 * detect a signal. If we have a signal, then poll for a "Link-Up"
825 * indication.
826 */
827 if (hw->media_type == e1000_media_type_internal_serdes ||
828 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
829 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
830 } else {
831 hw_dbg(hw, "No signal detected\n");
832 }
833
834 return 0;
835}
836
837/**
838 * e1000e_config_collision_dist - Configure collision distance
839 * @hw: pointer to the HW structure
840 *
841 * Configures the collision distance to the default value and is used
842 * during link setup. Currently no func pointer exists and all
843 * implementations are handled in the generic version of this function.
844 **/
845void e1000e_config_collision_dist(struct e1000_hw *hw)
846{
847 u32 tctl;
848
849 tctl = er32(TCTL);
850
851 tctl &= ~E1000_TCTL_COLD;
852 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
853
854 ew32(TCTL, tctl);
855 e1e_flush();
856}
857
858/**
859 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
860 * @hw: pointer to the HW structure
861 *
862 * Sets the flow control high/low threshold (watermark) registers. If
863 * flow control XON frame transmission is enabled, then set XON frame
864 * tansmission as well.
865 **/
866s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
867{
868 struct e1000_mac_info *mac = &hw->mac;
869 u32 fcrtl = 0, fcrth = 0;
870
871 /* Set the flow control receive threshold registers. Normally,
872 * these registers will be set to a default threshold that may be
873 * adjusted later by the driver's runtime code. However, if the
874 * ability to transmit pause frames is not enabled, then these
875 * registers will be set to 0.
876 */
877 if (mac->fc & e1000_fc_tx_pause) {
878 /* We need to set up the Receive Threshold high and low water
879 * marks as well as (optionally) enabling the transmission of
880 * XON frames.
881 */
882 fcrtl = mac->fc_low_water;
883 fcrtl |= E1000_FCRTL_XONE;
884 fcrth = mac->fc_high_water;
885 }
886 ew32(FCRTL, fcrtl);
887 ew32(FCRTH, fcrth);
888
889 return 0;
890}
891
892/**
893 * e1000e_force_mac_fc - Force the MAC's flow control settings
894 * @hw: pointer to the HW structure
895 *
896 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
897 * device control register to reflect the adapter settings. TFCE and RFCE
898 * need to be explicitly set by software when a copper PHY is used because
899 * autonegotiation is managed by the PHY rather than the MAC. Software must
900 * also configure these bits when link is forced on a fiber connection.
901 **/
902s32 e1000e_force_mac_fc(struct e1000_hw *hw)
903{
904 struct e1000_mac_info *mac = &hw->mac;
905 u32 ctrl;
906
907 ctrl = er32(CTRL);
908
909 /* Because we didn't get link via the internal auto-negotiation
910 * mechanism (we either forced link or we got link via PHY
911 * auto-neg), we have to manually enable/disable transmit an
912 * receive flow control.
913 *
914 * The "Case" statement below enables/disable flow control
915 * according to the "mac->fc" parameter.
916 *
917 * The possible values of the "fc" parameter are:
918 * 0: Flow control is completely disabled
919 * 1: Rx flow control is enabled (we can receive pause
920 * frames but not send pause frames).
921 * 2: Tx flow control is enabled (we can send pause frames
922 * frames but we do not receive pause frames).
923 * 3: Both Rx and TX flow control (symmetric) is enabled.
924 * other: No other values should be possible at this point.
925 */
926 hw_dbg(hw, "mac->fc = %u\n", mac->fc);
927
928 switch (mac->fc) {
929 case e1000_fc_none:
930 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
931 break;
932 case e1000_fc_rx_pause:
933 ctrl &= (~E1000_CTRL_TFCE);
934 ctrl |= E1000_CTRL_RFCE;
935 break;
936 case e1000_fc_tx_pause:
937 ctrl &= (~E1000_CTRL_RFCE);
938 ctrl |= E1000_CTRL_TFCE;
939 break;
940 case e1000_fc_full:
941 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
942 break;
943 default:
944 hw_dbg(hw, "Flow control param set incorrectly\n");
945 return -E1000_ERR_CONFIG;
946 }
947
948 ew32(CTRL, ctrl);
949
950 return 0;
951}
952
953/**
954 * e1000e_config_fc_after_link_up - Configures flow control after link
955 * @hw: pointer to the HW structure
956 *
957 * Checks the status of auto-negotiation after link up to ensure that the
958 * speed and duplex were not forced. If the link needed to be forced, then
959 * flow control needs to be forced also. If auto-negotiation is enabled
960 * and did not fail, then we configure flow control based on our link
961 * partner.
962 **/
963s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
964{
965 struct e1000_mac_info *mac = &hw->mac;
966 s32 ret_val = 0;
967 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
968 u16 speed, duplex;
969
970 /* Check for the case where we have fiber media and auto-neg failed
971 * so we had to force link. In this case, we need to force the
972 * configuration of the MAC to match the "fc" parameter.
973 */
974 if (mac->autoneg_failed) {
975 if (hw->media_type == e1000_media_type_fiber ||
976 hw->media_type == e1000_media_type_internal_serdes)
977 ret_val = e1000e_force_mac_fc(hw);
978 } else {
979 if (hw->media_type == e1000_media_type_copper)
980 ret_val = e1000e_force_mac_fc(hw);
981 }
982
983 if (ret_val) {
984 hw_dbg(hw, "Error forcing flow control settings\n");
985 return ret_val;
986 }
987
988 /* Check for the case where we have copper media and auto-neg is
989 * enabled. In this case, we need to check and see if Auto-Neg
990 * has completed, and if so, how the PHY and link partner has
991 * flow control configured.
992 */
993 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
994 /* Read the MII Status Register and check to see if AutoNeg
995 * has completed. We read this twice because this reg has
996 * some "sticky" (latched) bits.
997 */
998 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
999 if (ret_val)
1000 return ret_val;
1001 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1002 if (ret_val)
1003 return ret_val;
1004
1005 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1006 hw_dbg(hw, "Copper PHY and Auto Neg "
1007 "has not completed.\n");
1008 return ret_val;
1009 }
1010
1011 /* The AutoNeg process has completed, so we now need to
1012 * read both the Auto Negotiation Advertisement
1013 * Register (Address 4) and the Auto_Negotiation Base
1014 * Page Ability Register (Address 5) to determine how
1015 * flow control was negotiated.
1016 */
1017 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1018 if (ret_val)
1019 return ret_val;
1020 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1021 if (ret_val)
1022 return ret_val;
1023
1024 /* Two bits in the Auto Negotiation Advertisement Register
1025 * (Address 4) and two bits in the Auto Negotiation Base
1026 * Page Ability Register (Address 5) determine flow control
1027 * for both the PHY and the link partner. The following
1028 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1029 * 1999, describes these PAUSE resolution bits and how flow
1030 * control is determined based upon these settings.
1031 * NOTE: DC = Don't Care
1032 *
1033 * LOCAL DEVICE | LINK PARTNER
1034 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1035 *-------|---------|-------|---------|--------------------
1036 * 0 | 0 | DC | DC | e1000_fc_none
1037 * 0 | 1 | 0 | DC | e1000_fc_none
1038 * 0 | 1 | 1 | 0 | e1000_fc_none
1039 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1040 * 1 | 0 | 0 | DC | e1000_fc_none
1041 * 1 | DC | 1 | DC | e1000_fc_full
1042 * 1 | 1 | 0 | 0 | e1000_fc_none
1043 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1044 *
1045 */
1046 /* Are both PAUSE bits set to 1? If so, this implies
1047 * Symmetric Flow Control is enabled at both ends. The
1048 * ASM_DIR bits are irrelevant per the spec.
1049 *
1050 * For Symmetric Flow Control:
1051 *
1052 * LOCAL DEVICE | LINK PARTNER
1053 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1054 *-------|---------|-------|---------|--------------------
1055 * 1 | DC | 1 | DC | E1000_fc_full
1056 *
1057 */
1058 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1059 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1060 /* Now we need to check if the user selected RX ONLY
1061 * of pause frames. In this case, we had to advertise
1062 * FULL flow control because we could not advertise RX
1063 * ONLY. Hence, we must now check to see if we need to
1064 * turn OFF the TRANSMISSION of PAUSE frames.
1065 */
1066 if (mac->original_fc == e1000_fc_full) {
1067 mac->fc = e1000_fc_full;
1068 hw_dbg(hw, "Flow Control = FULL.\r\n");
1069 } else {
1070 mac->fc = e1000_fc_rx_pause;
1071 hw_dbg(hw, "Flow Control = "
1072 "RX PAUSE frames only.\r\n");
1073 }
1074 }
1075 /* For receiving PAUSE frames ONLY.
1076 *
1077 * LOCAL DEVICE | LINK PARTNER
1078 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1079 *-------|---------|-------|---------|--------------------
1080 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1081 *
1082 */
1083 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1084 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1085 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1086 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1087 mac->fc = e1000_fc_tx_pause;
1088 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1089 }
1090 /* For transmitting PAUSE frames ONLY.
1091 *
1092 * LOCAL DEVICE | LINK PARTNER
1093 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1094 *-------|---------|-------|---------|--------------------
1095 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1096 *
1097 */
1098 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1099 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1100 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1101 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1102 mac->fc = e1000_fc_rx_pause;
1103 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1104 }
1105 /* Per the IEEE spec, at this point flow control should be
1106 * disabled. However, we want to consider that we could
1107 * be connected to a legacy switch that doesn't advertise
1108 * desired flow control, but can be forced on the link
1109 * partner. So if we advertised no flow control, that is
1110 * what we will resolve to. If we advertised some kind of
1111 * receive capability (Rx Pause Only or Full Flow Control)
1112 * and the link partner advertised none, we will configure
1113 * ourselves to enable Rx Flow Control only. We can do
1114 * this safely for two reasons: If the link partner really
1115 * didn't want flow control enabled, and we enable Rx, no
1116 * harm done since we won't be receiving any PAUSE frames
1117 * anyway. If the intent on the link partner was to have
1118 * flow control enabled, then by us enabling RX only, we
1119 * can at least receive pause frames and process them.
1120 * This is a good idea because in most cases, since we are
1121 * predominantly a server NIC, more times than not we will
1122 * be asked to delay transmission of packets than asking
1123 * our link partner to pause transmission of frames.
1124 */
1125 else if ((mac->original_fc == e1000_fc_none) ||
1126 (mac->original_fc == e1000_fc_tx_pause)) {
1127 mac->fc = e1000_fc_none;
1128 hw_dbg(hw, "Flow Control = NONE.\r\n");
1129 } else {
1130 mac->fc = e1000_fc_rx_pause;
1131 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1132 }
1133
1134 /* Now we need to do one last check... If we auto-
1135 * negotiated to HALF DUPLEX, flow control should not be
1136 * enabled per IEEE 802.3 spec.
1137 */
1138 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1139 if (ret_val) {
1140 hw_dbg(hw, "Error getting link speed and duplex\n");
1141 return ret_val;
1142 }
1143
1144 if (duplex == HALF_DUPLEX)
1145 mac->fc = e1000_fc_none;
1146
1147 /* Now we call a subroutine to actually force the MAC
1148 * controller to use the correct flow control settings.
1149 */
1150 ret_val = e1000e_force_mac_fc(hw);
1151 if (ret_val) {
1152 hw_dbg(hw, "Error forcing flow control settings\n");
1153 return ret_val;
1154 }
1155 }
1156
1157 return 0;
1158}
1159
1160/**
1161 * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
1162 * @hw: pointer to the HW structure
1163 * @speed: stores the current speed
1164 * @duplex: stores the current duplex
1165 *
1166 * Read the status register for the current speed/duplex and store the current
1167 * speed and duplex for copper connections.
1168 **/
1169s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1170{
1171 u32 status;
1172
1173 status = er32(STATUS);
1174 if (status & E1000_STATUS_SPEED_1000) {
1175 *speed = SPEED_1000;
1176 hw_dbg(hw, "1000 Mbs, ");
1177 } else if (status & E1000_STATUS_SPEED_100) {
1178 *speed = SPEED_100;
1179 hw_dbg(hw, "100 Mbs, ");
1180 } else {
1181 *speed = SPEED_10;
1182 hw_dbg(hw, "10 Mbs, ");
1183 }
1184
1185 if (status & E1000_STATUS_FD) {
1186 *duplex = FULL_DUPLEX;
1187 hw_dbg(hw, "Full Duplex\n");
1188 } else {
1189 *duplex = HALF_DUPLEX;
1190 hw_dbg(hw, "Half Duplex\n");
1191 }
1192
1193 return 0;
1194}
1195
1196/**
1197 * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
1198 * @hw: pointer to the HW structure
1199 * @speed: stores the current speed
1200 * @duplex: stores the current duplex
1201 *
1202 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1203 * for fiber/serdes links.
1204 **/
1205s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1206{
1207 *speed = SPEED_1000;
1208 *duplex = FULL_DUPLEX;
1209
1210 return 0;
1211}
1212
1213/**
1214 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1215 * @hw: pointer to the HW structure
1216 *
1217 * Acquire the HW semaphore to access the PHY or NVM
1218 **/
1219s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1220{
1221 u32 swsm;
1222 s32 timeout = hw->nvm.word_size + 1;
1223 s32 i = 0;
1224
1225 /* Get the SW semaphore */
1226 while (i < timeout) {
1227 swsm = er32(SWSM);
1228 if (!(swsm & E1000_SWSM_SMBI))
1229 break;
1230
1231 udelay(50);
1232 i++;
1233 }
1234
1235 if (i == timeout) {
1236 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1237 return -E1000_ERR_NVM;
1238 }
1239
1240 /* Get the FW semaphore. */
1241 for (i = 0; i < timeout; i++) {
1242 swsm = er32(SWSM);
1243 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1244
1245 /* Semaphore acquired if bit latched */
1246 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1247 break;
1248
1249 udelay(50);
1250 }
1251
1252 if (i == timeout) {
1253 /* Release semaphores */
1254 e1000e_put_hw_semaphore(hw);
1255 hw_dbg(hw, "Driver can't access the NVM\n");
1256 return -E1000_ERR_NVM;
1257 }
1258
1259 return 0;
1260}
1261
1262/**
1263 * e1000e_put_hw_semaphore - Release hardware semaphore
1264 * @hw: pointer to the HW structure
1265 *
1266 * Release hardware semaphore used to access the PHY or NVM
1267 **/
1268void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1269{
1270 u32 swsm;
1271
1272 swsm = er32(SWSM);
1273 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1274 ew32(SWSM, swsm);
1275}
1276
1277/**
1278 * e1000e_get_auto_rd_done - Check for auto read completion
1279 * @hw: pointer to the HW structure
1280 *
1281 * Check EEPROM for Auto Read done bit.
1282 **/
1283s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1284{
1285 s32 i = 0;
1286
1287 while (i < AUTO_READ_DONE_TIMEOUT) {
1288 if (er32(EECD) & E1000_EECD_AUTO_RD)
1289 break;
1290 msleep(1);
1291 i++;
1292 }
1293
1294 if (i == AUTO_READ_DONE_TIMEOUT) {
1295 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1296 return -E1000_ERR_RESET;
1297 }
1298
1299 return 0;
1300}
1301
1302/**
1303 * e1000e_valid_led_default - Verify a valid default LED config
1304 * @hw: pointer to the HW structure
1305 * @data: pointer to the NVM (EEPROM)
1306 *
1307 * Read the EEPROM for the current default LED configuration. If the
1308 * LED configuration is not valid, set to a valid LED configuration.
1309 **/
1310s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1311{
1312 s32 ret_val;
1313
1314 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1315 if (ret_val) {
1316 hw_dbg(hw, "NVM Read Error\n");
1317 return ret_val;
1318 }
1319
1320 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1321 *data = ID_LED_DEFAULT;
1322
1323 return 0;
1324}
1325
1326/**
1327 * e1000e_id_led_init -
1328 * @hw: pointer to the HW structure
1329 *
1330 **/
1331s32 e1000e_id_led_init(struct e1000_hw *hw)
1332{
1333 struct e1000_mac_info *mac = &hw->mac;
1334 s32 ret_val;
1335 const u32 ledctl_mask = 0x000000FF;
1336 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1337 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1338 u16 data, i, temp;
1339 const u16 led_mask = 0x0F;
1340
1341 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1342 if (ret_val)
1343 return ret_val;
1344
1345 mac->ledctl_default = er32(LEDCTL);
1346 mac->ledctl_mode1 = mac->ledctl_default;
1347 mac->ledctl_mode2 = mac->ledctl_default;
1348
1349 for (i = 0; i < 4; i++) {
1350 temp = (data >> (i << 2)) & led_mask;
1351 switch (temp) {
1352 case ID_LED_ON1_DEF2:
1353 case ID_LED_ON1_ON2:
1354 case ID_LED_ON1_OFF2:
1355 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1356 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1357 break;
1358 case ID_LED_OFF1_DEF2:
1359 case ID_LED_OFF1_ON2:
1360 case ID_LED_OFF1_OFF2:
1361 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1362 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1363 break;
1364 default:
1365 /* Do nothing */
1366 break;
1367 }
1368 switch (temp) {
1369 case ID_LED_DEF1_ON2:
1370 case ID_LED_ON1_ON2:
1371 case ID_LED_OFF1_ON2:
1372 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1373 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1374 break;
1375 case ID_LED_DEF1_OFF2:
1376 case ID_LED_ON1_OFF2:
1377 case ID_LED_OFF1_OFF2:
1378 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1379 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1380 break;
1381 default:
1382 /* Do nothing */
1383 break;
1384 }
1385 }
1386
1387 return 0;
1388}
1389
1390/**
1391 * e1000e_cleanup_led_generic - Set LED config to default operation
1392 * @hw: pointer to the HW structure
1393 *
1394 * Remove the current LED configuration and set the LED configuration
1395 * to the default value, saved from the EEPROM.
1396 **/
1397s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1398{
1399 ew32(LEDCTL, hw->mac.ledctl_default);
1400 return 0;
1401}
1402
1403/**
1404 * e1000e_blink_led - Blink LED
1405 * @hw: pointer to the HW structure
1406 *
1407 * Blink the led's which are set to be on.
1408 **/
1409s32 e1000e_blink_led(struct e1000_hw *hw)
1410{
1411 u32 ledctl_blink = 0;
1412 u32 i;
1413
1414 if (hw->media_type == e1000_media_type_fiber) {
1415 /* always blink LED0 for PCI-E fiber */
1416 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1417 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1418 } else {
1419 /* set the blink bit for each LED that's "on" (0x0E)
1420 * in ledctl_mode2 */
1421 ledctl_blink = hw->mac.ledctl_mode2;
1422 for (i = 0; i < 4; i++)
1423 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1424 E1000_LEDCTL_MODE_LED_ON)
1425 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1426 (i * 8));
1427 }
1428
1429 ew32(LEDCTL, ledctl_blink);
1430
1431 return 0;
1432}
1433
1434/**
1435 * e1000e_led_on_generic - Turn LED on
1436 * @hw: pointer to the HW structure
1437 *
1438 * Turn LED on.
1439 **/
1440s32 e1000e_led_on_generic(struct e1000_hw *hw)
1441{
1442 u32 ctrl;
1443
1444 switch (hw->media_type) {
1445 case e1000_media_type_fiber:
1446 ctrl = er32(CTRL);
1447 ctrl &= ~E1000_CTRL_SWDPIN0;
1448 ctrl |= E1000_CTRL_SWDPIO0;
1449 ew32(CTRL, ctrl);
1450 break;
1451 case e1000_media_type_copper:
1452 ew32(LEDCTL, hw->mac.ledctl_mode2);
1453 break;
1454 default:
1455 break;
1456 }
1457
1458 return 0;
1459}
1460
1461/**
1462 * e1000e_led_off_generic - Turn LED off
1463 * @hw: pointer to the HW structure
1464 *
1465 * Turn LED off.
1466 **/
1467s32 e1000e_led_off_generic(struct e1000_hw *hw)
1468{
1469 u32 ctrl;
1470
1471 switch (hw->media_type) {
1472 case e1000_media_type_fiber:
1473 ctrl = er32(CTRL);
1474 ctrl |= E1000_CTRL_SWDPIN0;
1475 ctrl |= E1000_CTRL_SWDPIO0;
1476 ew32(CTRL, ctrl);
1477 break;
1478 case e1000_media_type_copper:
1479 ew32(LEDCTL, hw->mac.ledctl_mode1);
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 return 0;
1486}
1487
1488/**
1489 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1490 * @hw: pointer to the HW structure
1491 * @no_snoop: bitmap of snoop events
1492 *
1493 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1494 **/
1495void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1496{
1497 u32 gcr;
1498
1499 if (no_snoop) {
1500 gcr = er32(GCR);
1501 gcr &= ~(PCIE_NO_SNOOP_ALL);
1502 gcr |= no_snoop;
1503 ew32(GCR, gcr);
1504 }
1505}
1506
1507/**
1508 * e1000e_disable_pcie_master - Disables PCI-express master access
1509 * @hw: pointer to the HW structure
1510 *
1511 * Returns 0 if successful, else returns -10
1512 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1513 * the master requests to be disabled.
1514 *
1515 * Disables PCI-Express master access and verifies there are no pending
1516 * requests.
1517 **/
1518s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1519{
1520 u32 ctrl;
1521 s32 timeout = MASTER_DISABLE_TIMEOUT;
1522
1523 ctrl = er32(CTRL);
1524 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1525 ew32(CTRL, ctrl);
1526
1527 while (timeout) {
1528 if (!(er32(STATUS) &
1529 E1000_STATUS_GIO_MASTER_ENABLE))
1530 break;
1531 udelay(100);
1532 timeout--;
1533 }
1534
1535 if (!timeout) {
1536 hw_dbg(hw, "Master requests are pending.\n");
1537 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1538 }
1539
1540 return 0;
1541}
1542
1543/**
1544 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1545 * @hw: pointer to the HW structure
1546 *
1547 * Reset the Adaptive Interframe Spacing throttle to default values.
1548 **/
1549void e1000e_reset_adaptive(struct e1000_hw *hw)
1550{
1551 struct e1000_mac_info *mac = &hw->mac;
1552
1553 mac->current_ifs_val = 0;
1554 mac->ifs_min_val = IFS_MIN;
1555 mac->ifs_max_val = IFS_MAX;
1556 mac->ifs_step_size = IFS_STEP;
1557 mac->ifs_ratio = IFS_RATIO;
1558
1559 mac->in_ifs_mode = 0;
1560 ew32(AIT, 0);
1561}
1562
1563/**
1564 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1565 * @hw: pointer to the HW structure
1566 *
1567 * Update the Adaptive Interframe Spacing Throttle value based on the
1568 * time between transmitted packets and time between collisions.
1569 **/
1570void e1000e_update_adaptive(struct e1000_hw *hw)
1571{
1572 struct e1000_mac_info *mac = &hw->mac;
1573
1574 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1575 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1576 mac->in_ifs_mode = 1;
1577 if (mac->current_ifs_val < mac->ifs_max_val) {
1578 if (!mac->current_ifs_val)
1579 mac->current_ifs_val = mac->ifs_min_val;
1580 else
1581 mac->current_ifs_val +=
1582 mac->ifs_step_size;
1583 ew32(AIT,
1584 mac->current_ifs_val);
1585 }
1586 }
1587 } else {
1588 if (mac->in_ifs_mode &&
1589 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1590 mac->current_ifs_val = 0;
1591 mac->in_ifs_mode = 0;
1592 ew32(AIT, 0);
1593 }
1594 }
1595}
1596
1597/**
1598 * e1000_raise_eec_clk - Raise EEPROM clock
1599 * @hw: pointer to the HW structure
1600 * @eecd: pointer to the EEPROM
1601 *
1602 * Enable/Raise the EEPROM clock bit.
1603 **/
1604static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1605{
1606 *eecd = *eecd | E1000_EECD_SK;
1607 ew32(EECD, *eecd);
1608 e1e_flush();
1609 udelay(hw->nvm.delay_usec);
1610}
1611
1612/**
1613 * e1000_lower_eec_clk - Lower EEPROM clock
1614 * @hw: pointer to the HW structure
1615 * @eecd: pointer to the EEPROM
1616 *
1617 * Clear/Lower the EEPROM clock bit.
1618 **/
1619static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1620{
1621 *eecd = *eecd & ~E1000_EECD_SK;
1622 ew32(EECD, *eecd);
1623 e1e_flush();
1624 udelay(hw->nvm.delay_usec);
1625}
1626
1627/**
1628 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1629 * @hw: pointer to the HW structure
1630 * @data: data to send to the EEPROM
1631 * @count: number of bits to shift out
1632 *
1633 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1634 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1635 * In order to do this, "data" must be broken down into bits.
1636 **/
1637static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1638{
1639 struct e1000_nvm_info *nvm = &hw->nvm;
1640 u32 eecd = er32(EECD);
1641 u32 mask;
1642
1643 mask = 0x01 << (count - 1);
1644 if (nvm->type == e1000_nvm_eeprom_spi)
1645 eecd |= E1000_EECD_DO;
1646
1647 do {
1648 eecd &= ~E1000_EECD_DI;
1649
1650 if (data & mask)
1651 eecd |= E1000_EECD_DI;
1652
1653 ew32(EECD, eecd);
1654 e1e_flush();
1655
1656 udelay(nvm->delay_usec);
1657
1658 e1000_raise_eec_clk(hw, &eecd);
1659 e1000_lower_eec_clk(hw, &eecd);
1660
1661 mask >>= 1;
1662 } while (mask);
1663
1664 eecd &= ~E1000_EECD_DI;
1665 ew32(EECD, eecd);
1666}
1667
1668/**
1669 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1670 * @hw: pointer to the HW structure
1671 * @count: number of bits to shift in
1672 *
1673 * In order to read a register from the EEPROM, we need to shift 'count' bits
1674 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1675 * the EEPROM (setting the SK bit), and then reading the value of the data out
1676 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1677 * always be clear.
1678 **/
1679static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1680{
1681 u32 eecd;
1682 u32 i;
1683 u16 data;
1684
1685 eecd = er32(EECD);
1686
1687 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1688 data = 0;
1689
1690 for (i = 0; i < count; i++) {
1691 data <<= 1;
1692 e1000_raise_eec_clk(hw, &eecd);
1693
1694 eecd = er32(EECD);
1695
1696 eecd &= ~E1000_EECD_DI;
1697 if (eecd & E1000_EECD_DO)
1698 data |= 1;
1699
1700 e1000_lower_eec_clk(hw, &eecd);
1701 }
1702
1703 return data;
1704}
1705
1706/**
1707 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1708 * @hw: pointer to the HW structure
1709 * @ee_reg: EEPROM flag for polling
1710 *
1711 * Polls the EEPROM status bit for either read or write completion based
1712 * upon the value of 'ee_reg'.
1713 **/
1714s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1715{
1716 u32 attempts = 100000;
1717 u32 i, reg = 0;
1718
1719 for (i = 0; i < attempts; i++) {
1720 if (ee_reg == E1000_NVM_POLL_READ)
1721 reg = er32(EERD);
1722 else
1723 reg = er32(EEWR);
1724
1725 if (reg & E1000_NVM_RW_REG_DONE)
1726 return 0;
1727
1728 udelay(5);
1729 }
1730
1731 return -E1000_ERR_NVM;
1732}
1733
1734/**
1735 * e1000e_acquire_nvm - Generic request for access to EEPROM
1736 * @hw: pointer to the HW structure
1737 *
1738 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1739 * Return successful if access grant bit set, else clear the request for
1740 * EEPROM access and return -E1000_ERR_NVM (-1).
1741 **/
1742s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1743{
1744 u32 eecd = er32(EECD);
1745 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1746
1747 ew32(EECD, eecd | E1000_EECD_REQ);
1748 eecd = er32(EECD);
1749
1750 while (timeout) {
1751 if (eecd & E1000_EECD_GNT)
1752 break;
1753 udelay(5);
1754 eecd = er32(EECD);
1755 timeout--;
1756 }
1757
1758 if (!timeout) {
1759 eecd &= ~E1000_EECD_REQ;
1760 ew32(EECD, eecd);
1761 hw_dbg(hw, "Could not acquire NVM grant\n");
1762 return -E1000_ERR_NVM;
1763 }
1764
1765 return 0;
1766}
1767
1768/**
1769 * e1000_standby_nvm - Return EEPROM to standby state
1770 * @hw: pointer to the HW structure
1771 *
1772 * Return the EEPROM to a standby state.
1773 **/
1774static void e1000_standby_nvm(struct e1000_hw *hw)
1775{
1776 struct e1000_nvm_info *nvm = &hw->nvm;
1777 u32 eecd = er32(EECD);
1778
1779 if (nvm->type == e1000_nvm_eeprom_spi) {
1780 /* Toggle CS to flush commands */
1781 eecd |= E1000_EECD_CS;
1782 ew32(EECD, eecd);
1783 e1e_flush();
1784 udelay(nvm->delay_usec);
1785 eecd &= ~E1000_EECD_CS;
1786 ew32(EECD, eecd);
1787 e1e_flush();
1788 udelay(nvm->delay_usec);
1789 }
1790}
1791
1792/**
1793 * e1000_stop_nvm - Terminate EEPROM command
1794 * @hw: pointer to the HW structure
1795 *
1796 * Terminates the current command by inverting the EEPROM's chip select pin.
1797 **/
1798static void e1000_stop_nvm(struct e1000_hw *hw)
1799{
1800 u32 eecd;
1801
1802 eecd = er32(EECD);
1803 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1804 /* Pull CS high */
1805 eecd |= E1000_EECD_CS;
1806 e1000_lower_eec_clk(hw, &eecd);
1807 }
1808}
1809
1810/**
1811 * e1000e_release_nvm - Release exclusive access to EEPROM
1812 * @hw: pointer to the HW structure
1813 *
1814 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1815 **/
1816void e1000e_release_nvm(struct e1000_hw *hw)
1817{
1818 u32 eecd;
1819
1820 e1000_stop_nvm(hw);
1821
1822 eecd = er32(EECD);
1823 eecd &= ~E1000_EECD_REQ;
1824 ew32(EECD, eecd);
1825}
1826
1827/**
1828 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1829 * @hw: pointer to the HW structure
1830 *
1831 * Setups the EEPROM for reading and writing.
1832 **/
1833static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1834{
1835 struct e1000_nvm_info *nvm = &hw->nvm;
1836 u32 eecd = er32(EECD);
1837 u16 timeout = 0;
1838 u8 spi_stat_reg;
1839
1840 if (nvm->type == e1000_nvm_eeprom_spi) {
1841 /* Clear SK and CS */
1842 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1843 ew32(EECD, eecd);
1844 udelay(1);
1845 timeout = NVM_MAX_RETRY_SPI;
1846
1847 /* Read "Status Register" repeatedly until the LSB is cleared.
1848 * The EEPROM will signal that the command has been completed
1849 * by clearing bit 0 of the internal status register. If it's
1850 * not cleared within 'timeout', then error out. */
1851 while (timeout) {
1852 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1853 hw->nvm.opcode_bits);
1854 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1855 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1856 break;
1857
1858 udelay(5);
1859 e1000_standby_nvm(hw);
1860 timeout--;
1861 }
1862
1863 if (!timeout) {
1864 hw_dbg(hw, "SPI NVM Status error\n");
1865 return -E1000_ERR_NVM;
1866 }
1867 }
1868
1869 return 0;
1870}
1871
1872/**
1873 * e1000e_read_nvm_spi - Read EEPROM's using SPI
1874 * @hw: pointer to the HW structure
1875 * @offset: offset of word in the EEPROM to read
1876 * @words: number of words to read
1877 * @data: word read from the EEPROM
1878 *
1879 * Reads a 16 bit word from the EEPROM.
1880 **/
1881s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1882{
1883 struct e1000_nvm_info *nvm = &hw->nvm;
1884 u32 i = 0;
1885 s32 ret_val;
1886 u16 word_in;
1887 u8 read_opcode = NVM_READ_OPCODE_SPI;
1888
1889 /* A check for invalid values: offset too large, too many words,
1890 * and not enough words. */
1891 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1892 (words == 0)) {
1893 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1894 return -E1000_ERR_NVM;
1895 }
1896
1897 ret_val = nvm->ops.acquire_nvm(hw);
1898 if (ret_val)
1899 return ret_val;
1900
1901 ret_val = e1000_ready_nvm_eeprom(hw);
1902 if (ret_val) {
1903 nvm->ops.release_nvm(hw);
1904 return ret_val;
1905 }
1906
1907 e1000_standby_nvm(hw);
1908
1909 if ((nvm->address_bits == 8) && (offset >= 128))
1910 read_opcode |= NVM_A8_OPCODE_SPI;
1911
1912 /* Send the READ command (opcode + addr) */
1913 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1914 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1915
1916 /* Read the data. SPI NVMs increment the address with each byte
1917 * read and will roll over if reading beyond the end. This allows
1918 * us to read the whole NVM from any offset */
1919 for (i = 0; i < words; i++) {
1920 word_in = e1000_shift_in_eec_bits(hw, 16);
1921 data[i] = (word_in >> 8) | (word_in << 8);
1922 }
1923
1924 nvm->ops.release_nvm(hw);
1925 return 0;
1926}
1927
1928/**
1929 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1930 * @hw: pointer to the HW structure
1931 * @offset: offset of word in the EEPROM to read
1932 * @words: number of words to read
1933 * @data: word read from the EEPROM
1934 *
1935 * Reads a 16 bit word from the EEPROM using the EERD register.
1936 **/
1937s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1938{
1939 struct e1000_nvm_info *nvm = &hw->nvm;
1940 u32 i, eerd = 0;
1941 s32 ret_val = 0;
1942
1943 /* A check for invalid values: offset too large, too many words,
1944 * and not enough words. */
1945 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1946 (words == 0)) {
1947 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1948 return -E1000_ERR_NVM;
1949 }
1950
1951 for (i = 0; i < words; i++) {
1952 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1953 E1000_NVM_RW_REG_START;
1954
1955 ew32(EERD, eerd);
1956 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1957 if (ret_val)
1958 break;
1959
1960 data[i] = (er32(EERD) >>
1961 E1000_NVM_RW_REG_DATA);
1962 }
1963
1964 return ret_val;
1965}
1966
1967/**
1968 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1969 * @hw: pointer to the HW structure
1970 * @offset: offset within the EEPROM to be written to
1971 * @words: number of words to write
1972 * @data: 16 bit word(s) to be written to the EEPROM
1973 *
1974 * Writes data to EEPROM at offset using SPI interface.
1975 *
1976 * If e1000e_update_nvm_checksum is not called after this function , the
1977 * EEPROM will most likley contain an invalid checksum.
1978 **/
1979s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1980{
1981 struct e1000_nvm_info *nvm = &hw->nvm;
1982 s32 ret_val;
1983 u16 widx = 0;
1984
1985 /* A check for invalid values: offset too large, too many words,
1986 * and not enough words. */
1987 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988 (words == 0)) {
1989 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1990 return -E1000_ERR_NVM;
1991 }
1992
1993 ret_val = nvm->ops.acquire_nvm(hw);
1994 if (ret_val)
1995 return ret_val;
1996
1997 msleep(10);
1998
1999 while (widx < words) {
2000 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2001
2002 ret_val = e1000_ready_nvm_eeprom(hw);
2003 if (ret_val) {
2004 nvm->ops.release_nvm(hw);
2005 return ret_val;
2006 }
2007
2008 e1000_standby_nvm(hw);
2009
2010 /* Send the WRITE ENABLE command (8 bit opcode) */
2011 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2012 nvm->opcode_bits);
2013
2014 e1000_standby_nvm(hw);
2015
2016 /* Some SPI eeproms use the 8th address bit embedded in the
2017 * opcode */
2018 if ((nvm->address_bits == 8) && (offset >= 128))
2019 write_opcode |= NVM_A8_OPCODE_SPI;
2020
2021 /* Send the Write command (8-bit opcode + addr) */
2022 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2023 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2024 nvm->address_bits);
2025
2026 /* Loop to allow for up to whole page write of eeprom */
2027 while (widx < words) {
2028 u16 word_out = data[widx];
2029 word_out = (word_out >> 8) | (word_out << 8);
2030 e1000_shift_out_eec_bits(hw, word_out, 16);
2031 widx++;
2032
2033 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2034 e1000_standby_nvm(hw);
2035 break;
2036 }
2037 }
2038 }
2039
2040 msleep(10);
2041 return 0;
2042}
2043
2044/**
2045 * e1000e_read_mac_addr - Read device MAC address
2046 * @hw: pointer to the HW structure
2047 *
2048 * Reads the device MAC address from the EEPROM and stores the value.
2049 * Since devices with two ports use the same EEPROM, we increment the
2050 * last bit in the MAC address for the second port.
2051 **/
2052s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2053{
2054 s32 ret_val;
2055 u16 offset, nvm_data, i;
2056
2057 for (i = 0; i < ETH_ALEN; i += 2) {
2058 offset = i >> 1;
2059 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2060 if (ret_val) {
2061 hw_dbg(hw, "NVM Read Error\n");
2062 return ret_val;
2063 }
2064 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2065 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2066 }
2067
2068 /* Flip last bit of mac address if we're on second port */
2069 if (hw->bus.func == E1000_FUNC_1)
2070 hw->mac.perm_addr[5] ^= 1;
2071
2072 for (i = 0; i < ETH_ALEN; i++)
2073 hw->mac.addr[i] = hw->mac.perm_addr[i];
2074
2075 return 0;
2076}
2077
2078/**
2079 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2080 * @hw: pointer to the HW structure
2081 *
2082 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2083 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2084 **/
2085s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2086{
2087 s32 ret_val;
2088 u16 checksum = 0;
2089 u16 i, nvm_data;
2090
2091 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2092 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2093 if (ret_val) {
2094 hw_dbg(hw, "NVM Read Error\n");
2095 return ret_val;
2096 }
2097 checksum += nvm_data;
2098 }
2099
2100 if (checksum != (u16) NVM_SUM) {
2101 hw_dbg(hw, "NVM Checksum Invalid\n");
2102 return -E1000_ERR_NVM;
2103 }
2104
2105 return 0;
2106}
2107
2108/**
2109 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2110 * @hw: pointer to the HW structure
2111 *
2112 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2113 * up to the checksum. Then calculates the EEPROM checksum and writes the
2114 * value to the EEPROM.
2115 **/
2116s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2117{
2118 s32 ret_val;
2119 u16 checksum = 0;
2120 u16 i, nvm_data;
2121
2122 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2123 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2124 if (ret_val) {
2125 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2126 return ret_val;
2127 }
2128 checksum += nvm_data;
2129 }
2130 checksum = (u16) NVM_SUM - checksum;
2131 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2132 if (ret_val)
2133 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2134
2135 return ret_val;
2136}
2137
2138/**
2139 * e1000e_reload_nvm - Reloads EEPROM
2140 * @hw: pointer to the HW structure
2141 *
2142 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2143 * extended control register.
2144 **/
2145void e1000e_reload_nvm(struct e1000_hw *hw)
2146{
2147 u32 ctrl_ext;
2148
2149 udelay(10);
2150 ctrl_ext = er32(CTRL_EXT);
2151 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2152 ew32(CTRL_EXT, ctrl_ext);
2153 e1e_flush();
2154}
2155
2156/**
2157 * e1000_calculate_checksum - Calculate checksum for buffer
2158 * @buffer: pointer to EEPROM
2159 * @length: size of EEPROM to calculate a checksum for
2160 *
2161 * Calculates the checksum for some buffer on a specified length. The
2162 * checksum calculated is returned.
2163 **/
2164static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2165{
2166 u32 i;
2167 u8 sum = 0;
2168
2169 if (!buffer)
2170 return 0;
2171
2172 for (i = 0; i < length; i++)
2173 sum += buffer[i];
2174
2175 return (u8) (0 - sum);
2176}
2177
2178/**
2179 * e1000_mng_enable_host_if - Checks host interface is enabled
2180 * @hw: pointer to the HW structure
2181 *
2182 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2183 *
2184 * This function checks whether the HOST IF is enabled for command operaton
2185 * and also checks whether the previous command is completed. It busy waits
2186 * in case of previous command is not completed.
2187 **/
2188static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2189{
2190 u32 hicr;
2191 u8 i;
2192
2193 /* Check that the host interface is enabled. */
2194 hicr = er32(HICR);
2195 if ((hicr & E1000_HICR_EN) == 0) {
2196 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2197 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2198 }
2199 /* check the previous command is completed */
2200 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2201 hicr = er32(HICR);
2202 if (!(hicr & E1000_HICR_C))
2203 break;
2204 mdelay(1);
2205 }
2206
2207 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2208 hw_dbg(hw, "Previous command timeout failed .\n");
2209 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2210 }
2211
2212 return 0;
2213}
2214
2215/**
2216 * e1000e_check_mng_mode - check managament mode
2217 * @hw: pointer to the HW structure
2218 *
2219 * Reads the firmware semaphore register and returns true (>0) if
2220 * manageability is enabled, else false (0).
2221 **/
2222bool e1000e_check_mng_mode(struct e1000_hw *hw)
2223{
2224 u32 fwsm = er32(FWSM);
2225
2226 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
2227}
2228
2229/**
2230 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
2231 * @hw: pointer to the HW structure
2232 *
2233 * Enables packet filtering on transmit packets if manageability is enabled
2234 * and host interface is enabled.
2235 **/
2236bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2237{
2238 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2239 u32 *buffer = (u32 *)&hw->mng_cookie;
2240 u32 offset;
2241 s32 ret_val, hdr_csum, csum;
2242 u8 i, len;
2243
2244 /* No manageability, no filtering */
2245 if (!e1000e_check_mng_mode(hw)) {
2246 hw->mac.tx_pkt_filtering = 0;
2247 return 0;
2248 }
2249
2250 /* If we can't read from the host interface for whatever
2251 * reason, disable filtering.
2252 */
2253 ret_val = e1000_mng_enable_host_if(hw);
2254 if (ret_val != 0) {
2255 hw->mac.tx_pkt_filtering = 0;
2256 return ret_val;
2257 }
2258
2259 /* Read in the header. Length and offset are in dwords. */
2260 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2261 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2262 for (i = 0; i < len; i++)
2263 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2264 hdr_csum = hdr->checksum;
2265 hdr->checksum = 0;
2266 csum = e1000_calculate_checksum((u8 *)hdr,
2267 E1000_MNG_DHCP_COOKIE_LENGTH);
2268 /* If either the checksums or signature don't match, then
2269 * the cookie area isn't considered valid, in which case we
2270 * take the safe route of assuming Tx filtering is enabled.
2271 */
2272 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2273 hw->mac.tx_pkt_filtering = 1;
2274 return 1;
2275 }
2276
2277 /* Cookie area is valid, make the final check for filtering. */
2278 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2279 hw->mac.tx_pkt_filtering = 0;
2280 return 0;
2281 }
2282
2283 hw->mac.tx_pkt_filtering = 1;
2284 return 1;
2285}
2286
2287/**
2288 * e1000_mng_write_cmd_header - Writes manageability command header
2289 * @hw: pointer to the HW structure
2290 * @hdr: pointer to the host interface command header
2291 *
2292 * Writes the command header after does the checksum calculation.
2293 **/
2294static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2295 struct e1000_host_mng_command_header *hdr)
2296{
2297 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2298
2299 /* Write the whole command header structure with new checksum. */
2300
2301 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2302
2303 length >>= 2;
2304 /* Write the relevant command block into the ram area. */
2305 for (i = 0; i < length; i++) {
2306 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2307 *((u32 *) hdr + i));
2308 e1e_flush();
2309 }
2310
2311 return 0;
2312}
2313
2314/**
2315 * e1000_mng_host_if_write - Writes to the manageability host interface
2316 * @hw: pointer to the HW structure
2317 * @buffer: pointer to the host interface buffer
2318 * @length: size of the buffer
2319 * @offset: location in the buffer to write to
2320 * @sum: sum of the data (not checksum)
2321 *
2322 * This function writes the buffer content at the offset given on the host if.
2323 * It also does alignment considerations to do the writes in most efficient
2324 * way. Also fills up the sum of the buffer in *buffer parameter.
2325 **/
2326static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2327 u16 length, u16 offset, u8 *sum)
2328{
2329 u8 *tmp;
2330 u8 *bufptr = buffer;
2331 u32 data = 0;
2332 u16 remaining, i, j, prev_bytes;
2333
2334 /* sum = only sum of the data and it is not checksum */
2335
2336 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2337 return -E1000_ERR_PARAM;
2338
2339 tmp = (u8 *)&data;
2340 prev_bytes = offset & 0x3;
2341 offset >>= 2;
2342
2343 if (prev_bytes) {
2344 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2345 for (j = prev_bytes; j < sizeof(u32); j++) {
2346 *(tmp + j) = *bufptr++;
2347 *sum += *(tmp + j);
2348 }
2349 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2350 length -= j - prev_bytes;
2351 offset++;
2352 }
2353
2354 remaining = length & 0x3;
2355 length -= remaining;
2356
2357 /* Calculate length in DWORDs */
2358 length >>= 2;
2359
2360 /* The device driver writes the relevant command block into the
2361 * ram area. */
2362 for (i = 0; i < length; i++) {
2363 for (j = 0; j < sizeof(u32); j++) {
2364 *(tmp + j) = *bufptr++;
2365 *sum += *(tmp + j);
2366 }
2367
2368 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2369 }
2370 if (remaining) {
2371 for (j = 0; j < sizeof(u32); j++) {
2372 if (j < remaining)
2373 *(tmp + j) = *bufptr++;
2374 else
2375 *(tmp + j) = 0;
2376
2377 *sum += *(tmp + j);
2378 }
2379 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2380 }
2381
2382 return 0;
2383}
2384
2385/**
2386 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2387 * @hw: pointer to the HW structure
2388 * @buffer: pointer to the host interface
2389 * @length: size of the buffer
2390 *
2391 * Writes the DHCP information to the host interface.
2392 **/
2393s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2394{
2395 struct e1000_host_mng_command_header hdr;
2396 s32 ret_val;
2397 u32 hicr;
2398
2399 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2400 hdr.command_length = length;
2401 hdr.reserved1 = 0;
2402 hdr.reserved2 = 0;
2403 hdr.checksum = 0;
2404
2405 /* Enable the host interface */
2406 ret_val = e1000_mng_enable_host_if(hw);
2407 if (ret_val)
2408 return ret_val;
2409
2410 /* Populate the host interface with the contents of "buffer". */
2411 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2412 sizeof(hdr), &(hdr.checksum));
2413 if (ret_val)
2414 return ret_val;
2415
2416 /* Write the manageability command header */
2417 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2418 if (ret_val)
2419 return ret_val;
2420
2421 /* Tell the ARC a new command is pending. */
2422 hicr = er32(HICR);
2423 ew32(HICR, hicr | E1000_HICR_C);
2424
2425 return 0;
2426}
2427
2428/**
2429 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2430 * @hw: pointer to the HW structure
2431 *
2432 * Verifies the hardware needs to allow ARPs to be processed by the host.
2433 **/
2434bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2435{
2436 u32 manc;
2437 u32 fwsm, factps;
2438 bool ret_val = 0;
2439
2440 manc = er32(MANC);
2441
2442 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2443 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2444 return ret_val;
2445
2446 if (hw->mac.arc_subsystem_valid) {
2447 fwsm = er32(FWSM);
2448 factps = er32(FACTPS);
2449
2450 if (!(factps & E1000_FACTPS_MNGCG) &&
2451 ((fwsm & E1000_FWSM_MODE_MASK) ==
2452 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2453 ret_val = 1;
2454 return ret_val;
2455 }
2456 } else {
2457 if ((manc & E1000_MANC_SMBUS_EN) &&
2458 !(manc & E1000_MANC_ASF_EN)) {
2459 ret_val = 1;
2460 return ret_val;
2461 }
2462 }
2463
2464 return ret_val;
2465}
2466
2467s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2468{
2469 s32 ret_val;
2470 u16 nvm_data;
2471
2472 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2473 if (ret_val) {
2474 hw_dbg(hw, "NVM Read Error\n");
2475 return ret_val;
2476 }
2477 *part_num = (u32)(nvm_data << 16);
2478
2479 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2480 if (ret_val) {
2481 hw_dbg(hw, "NVM Read Error\n");
2482 return ret_val;
2483 }
2484 *part_num |= nvm_data;
2485
2486 return 0;
2487}