aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/e1000_mac.c
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2008-01-24 05:22:38 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:10:33 -0500
commit9d5c824399dea881779d78a6c147288bf2dccb6b (patch)
tree8c76b20c3cf1d81a63973e97578cea6a8a82a354 /drivers/net/igb/e1000_mac.c
parentb491edd5817f1618f4e06d67638739591a714bdb (diff)
igb: PCI-Express 82575 Gigabit Ethernet driver
We are pleased to announce a new Gigabit Ethernet product and its driver to the linux community. This product is the Intel(R) 82575 Gigabit Ethernet adapter family. Physical adapters will be available to the public soon. These adapters come in 2- and 4-port versions (copper PHY) currently. Other variants will be available later. The 82575 chipset supports significantly different features that warrant a new driver. The descriptor format is (just like the ixgbe driver) different. The device can use multiple MSI-X vectors and multiple queues for both send and receive. This allows us to optimize some of the driver code specifically as well compared to the e1000-supported devices. This version of the igb driver no lnger uses fake netdevices and incorporates napi_struct members for each ring to do the multi- queue polling. multi-queue is enabled by default and the driver supports NAPI mode only. All the namespace collisions should be gone in this version too. The register macro's have been condensed to improve readability. Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/e1000_mac.c')
-rw-r--r--drivers/net/igb/e1000_mac.c1505
1 files changed, 1505 insertions, 0 deletions
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
new file mode 100644
index 000000000000..3e84a3f0c1d8
--- /dev/null
+++ b/drivers/net/igb/e1000_mac.c
@@ -0,0 +1,1505 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32
33#include "e1000_mac.h"
34
35#include "igb.h"
36
37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
40
41/**
42 * e1000_remove_device - Free device specific structure
43 * @hw: pointer to the HW structure
44 *
45 * If a device specific structure was allocated, this function will
46 * free it.
47 **/
48void igb_remove_device(struct e1000_hw *hw)
49{
50 /* Freeing the dev_spec member of e1000_hw structure */
51 kfree(hw->dev_spec);
52}
53
54static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
55{
56 struct igb_adapter *adapter = hw->back;
57
58 pci_read_config_word(adapter->pdev, reg, value);
59}
60
61static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
62{
63 struct igb_adapter *adapter = hw->back;
64 u16 cap_offset;
65
66 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
67 if (!cap_offset)
68 return -E1000_ERR_CONFIG;
69
70 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
71
72 return 0;
73}
74
75/**
76 * e1000_get_bus_info_pcie - Get PCIe bus information
77 * @hw: pointer to the HW structure
78 *
79 * Determines and stores the system bus information for a particular
80 * network interface. The following bus information is determined and stored:
81 * bus speed, bus width, type (PCIe), and PCIe function.
82 **/
83s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
84{
85 struct e1000_bus_info *bus = &hw->bus;
86 s32 ret_val;
87 u32 status;
88 u16 pcie_link_status, pci_header_type;
89
90 bus->type = e1000_bus_type_pci_express;
91 bus->speed = e1000_bus_speed_2500;
92
93 ret_val = igb_read_pcie_cap_reg(hw,
94 PCIE_LINK_STATUS,
95 &pcie_link_status);
96 if (ret_val)
97 bus->width = e1000_bus_width_unknown;
98 else
99 bus->width = (enum e1000_bus_width)((pcie_link_status &
100 PCIE_LINK_WIDTH_MASK) >>
101 PCIE_LINK_WIDTH_SHIFT);
102
103 igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
104 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
105 status = rd32(E1000_STATUS);
106 bus->func = (status & E1000_STATUS_FUNC_MASK)
107 >> E1000_STATUS_FUNC_SHIFT;
108 } else {
109 bus->func = 0;
110 }
111
112 return 0;
113}
114
115/**
116 * e1000_clear_vfta - Clear VLAN filter table
117 * @hw: pointer to the HW structure
118 *
119 * Clears the register array which contains the VLAN filter table by
120 * setting all the values to 0.
121 **/
122void igb_clear_vfta(struct e1000_hw *hw)
123{
124 u32 offset;
125
126 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
127 array_wr32(E1000_VFTA, offset, 0);
128 wrfl();
129 }
130}
131
132/**
133 * e1000_write_vfta - Write value to VLAN filter table
134 * @hw: pointer to the HW structure
135 * @offset: register offset in VLAN filter table
136 * @value: register value written to VLAN filter table
137 *
138 * Writes value at the given offset in the register array which stores
139 * the VLAN filter table.
140 **/
141void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
142{
143 array_wr32(E1000_VFTA, offset, value);
144 wrfl();
145}
146
147/**
148 * e1000_init_rx_addrs - Initialize receive address's
149 * @hw: pointer to the HW structure
150 * @rar_count: receive address registers
151 *
152 * Setups the receive address registers by setting the base receive address
153 * register to the devices MAC address and clearing all the other receive
154 * address registers to 0.
155 **/
156void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
157{
158 u32 i;
159
160 /* Setup the receive address */
161 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
162
163 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
164
165 /* Zero out the other (rar_entry_count - 1) receive addresses */
166 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
167 for (i = 1; i < rar_count; i++) {
168 array_wr32(E1000_RA, (i << 1), 0);
169 wrfl();
170 array_wr32(E1000_RA, ((i << 1) + 1), 0);
171 wrfl();
172 }
173}
174
175/**
176 * e1000_check_alt_mac_addr - Check for alternate MAC addr
177 * @hw: pointer to the HW structure
178 *
179 * Checks the nvm for an alternate MAC address. An alternate MAC address
180 * can be setup by pre-boot software and must be treated like a permanent
181 * address and must override the actual permanent MAC address. If an
182 * alternate MAC address is fopund it is saved in the hw struct and
183 * prgrammed into RAR0 and the cuntion returns success, otherwise the
184 * fucntion returns an error.
185 **/
186s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
187{
188 u32 i;
189 s32 ret_val = 0;
190 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
191 u8 alt_mac_addr[ETH_ALEN];
192
193 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset);
195 if (ret_val) {
196 hw_dbg(hw, "NVM Read Error\n");
197 goto out;
198 }
199
200 if (nvm_alt_mac_addr_offset == 0xFFFF) {
201 ret_val = -(E1000_NOT_IMPLEMENTED);
202 goto out;
203 }
204
205 if (hw->bus.func == E1000_FUNC_1)
206 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
207
208 for (i = 0; i < ETH_ALEN; i += 2) {
209 offset = nvm_alt_mac_addr_offset + (i >> 1);
210 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
211 if (ret_val) {
212 hw_dbg(hw, "NVM Read Error\n");
213 goto out;
214 }
215
216 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
217 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
218 }
219
220 /* if multicast bit is set, the alternate address will not be used */
221 if (alt_mac_addr[0] & 0x01) {
222 ret_val = -(E1000_NOT_IMPLEMENTED);
223 goto out;
224 }
225
226 for (i = 0; i < ETH_ALEN; i++)
227 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
228
229 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
230
231out:
232 return ret_val;
233}
234
235/**
236 * e1000_rar_set - Set receive address register
237 * @hw: pointer to the HW structure
238 * @addr: pointer to the receive address
239 * @index: receive address array register
240 *
241 * Sets the receive address array register at index to the address passed
242 * in by addr.
243 **/
244void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
245{
246 u32 rar_low, rar_high;
247
248 /*
249 * HW expects these in little endian so we reverse the byte order
250 * from network order (big endian) to little endian
251 */
252 rar_low = ((u32) addr[0] |
253 ((u32) addr[1] << 8) |
254 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
255
256 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
257
258 if (!hw->mac.disable_av)
259 rar_high |= E1000_RAH_AV;
260
261 array_wr32(E1000_RA, (index << 1), rar_low);
262 array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
263}
264
265/**
266 * e1000_mta_set - Set multicast filter table address
267 * @hw: pointer to the HW structure
268 * @hash_value: determines the MTA register and bit to set
269 *
270 * The multicast table address is a register array of 32-bit registers.
271 * The hash_value is used to determine what register the bit is in, the
272 * current value is read, the new bit is OR'd in and the new value is
273 * written back into the register.
274 **/
275static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
276{
277 u32 hash_bit, hash_reg, mta;
278
279 /*
280 * The MTA is a register array of 32-bit registers. It is
281 * treated like an array of (32*mta_reg_count) bits. We want to
282 * set bit BitArray[hash_value]. So we figure out what register
283 * the bit is in, read it, OR in the new bit, then write
284 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
285 * mask to bits 31:5 of the hash value which gives us the
286 * register we're modifying. The hash bit within that register
287 * is determined by the lower 5 bits of the hash value.
288 */
289 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
290 hash_bit = hash_value & 0x1F;
291
292 mta = array_rd32(E1000_MTA, hash_reg);
293
294 mta |= (1 << hash_bit);
295
296 array_wr32(E1000_MTA, hash_reg, mta);
297 wrfl();
298}
299
300/**
301 * e1000_update_mc_addr_list - Update Multicast addresses
302 * @hw: pointer to the HW structure
303 * @mc_addr_list: array of multicast addresses to program
304 * @mc_addr_count: number of multicast addresses to program
305 * @rar_used_count: the first RAR register free to program
306 * @rar_count: total number of supported Receive Address Registers
307 *
308 * Updates the Receive Address Registers and Multicast Table Array.
309 * The caller must have a packed mc_addr_list of multicast addresses.
310 * The parameter rar_count will usually be hw->mac.rar_entry_count
311 * unless there are workarounds that change this.
312 **/
313void igb_update_mc_addr_list(struct e1000_hw *hw,
314 u8 *mc_addr_list, u32 mc_addr_count,
315 u32 rar_used_count, u32 rar_count)
316{
317 u32 hash_value;
318 u32 i;
319
320 /*
321 * Load the first set of multicast addresses into the exact
322 * filters (RAR). If there are not enough to fill the RAR
323 * array, clear the filters.
324 */
325 for (i = rar_used_count; i < rar_count; i++) {
326 if (mc_addr_count) {
327 hw->mac.ops.rar_set(hw, mc_addr_list, i);
328 mc_addr_count--;
329 mc_addr_list += ETH_ALEN;
330 } else {
331 array_wr32(E1000_RA, i << 1, 0);
332 wrfl();
333 array_wr32(E1000_RA, (i << 1) + 1, 0);
334 wrfl();
335 }
336 }
337
338 /* Clear the old settings from the MTA */
339 hw_dbg(hw, "Clearing MTA\n");
340 for (i = 0; i < hw->mac.mta_reg_count; i++) {
341 array_wr32(E1000_MTA, i, 0);
342 wrfl();
343 }
344
345 /* Load any remaining multicast addresses into the hash table. */
346 for (; mc_addr_count > 0; mc_addr_count--) {
347 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
348 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
349 igb_mta_set(hw, hash_value);
350 mc_addr_list += ETH_ALEN;
351 }
352}
353
354/**
355 * e1000_hash_mc_addr - Generate a multicast hash value
356 * @hw: pointer to the HW structure
357 * @mc_addr: pointer to a multicast address
358 *
359 * Generates a multicast address hash value which is used to determine
360 * the multicast filter table array address and new table value. See
361 * igb_mta_set()
362 **/
363static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
364{
365 u32 hash_value, hash_mask;
366 u8 bit_shift = 0;
367
368 /* Register count multiplied by bits per register */
369 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
370
371 /*
372 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
373 * where 0xFF would still fall within the hash mask.
374 */
375 while (hash_mask >> bit_shift != 0xFF)
376 bit_shift++;
377
378 /*
379 * The portion of the address that is used for the hash table
380 * is determined by the mc_filter_type setting.
381 * The algorithm is such that there is a total of 8 bits of shifting.
382 * The bit_shift for a mc_filter_type of 0 represents the number of
383 * left-shifts where the MSB of mc_addr[5] would still fall within
384 * the hash_mask. Case 0 does this exactly. Since there are a total
385 * of 8 bits of shifting, then mc_addr[4] will shift right the
386 * remaining number of bits. Thus 8 - bit_shift. The rest of the
387 * cases are a variation of this algorithm...essentially raising the
388 * number of bits to shift mc_addr[5] left, while still keeping the
389 * 8-bit shifting total.
390 *
391 * For example, given the following Destination MAC Address and an
392 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
393 * we can see that the bit_shift for case 0 is 4. These are the hash
394 * values resulting from each mc_filter_type...
395 * [0] [1] [2] [3] [4] [5]
396 * 01 AA 00 12 34 56
397 * LSB MSB
398 *
399 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
400 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
401 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
402 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
403 */
404 switch (hw->mac.mc_filter_type) {
405 default:
406 case 0:
407 break;
408 case 1:
409 bit_shift += 1;
410 break;
411 case 2:
412 bit_shift += 2;
413 break;
414 case 3:
415 bit_shift += 4;
416 break;
417 }
418
419 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
420 (((u16) mc_addr[5]) << bit_shift)));
421
422 return hash_value;
423}
424
425/**
426 * e1000_clear_hw_cntrs_base - Clear base hardware counters
427 * @hw: pointer to the HW structure
428 *
429 * Clears the base hardware counters by reading the counter registers.
430 **/
431void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
432{
433 u32 temp;
434
435 temp = rd32(E1000_CRCERRS);
436 temp = rd32(E1000_SYMERRS);
437 temp = rd32(E1000_MPC);
438 temp = rd32(E1000_SCC);
439 temp = rd32(E1000_ECOL);
440 temp = rd32(E1000_MCC);
441 temp = rd32(E1000_LATECOL);
442 temp = rd32(E1000_COLC);
443 temp = rd32(E1000_DC);
444 temp = rd32(E1000_SEC);
445 temp = rd32(E1000_RLEC);
446 temp = rd32(E1000_XONRXC);
447 temp = rd32(E1000_XONTXC);
448 temp = rd32(E1000_XOFFRXC);
449 temp = rd32(E1000_XOFFTXC);
450 temp = rd32(E1000_FCRUC);
451 temp = rd32(E1000_GPRC);
452 temp = rd32(E1000_BPRC);
453 temp = rd32(E1000_MPRC);
454 temp = rd32(E1000_GPTC);
455 temp = rd32(E1000_GORCL);
456 temp = rd32(E1000_GORCH);
457 temp = rd32(E1000_GOTCL);
458 temp = rd32(E1000_GOTCH);
459 temp = rd32(E1000_RNBC);
460 temp = rd32(E1000_RUC);
461 temp = rd32(E1000_RFC);
462 temp = rd32(E1000_ROC);
463 temp = rd32(E1000_RJC);
464 temp = rd32(E1000_TORL);
465 temp = rd32(E1000_TORH);
466 temp = rd32(E1000_TOTL);
467 temp = rd32(E1000_TOTH);
468 temp = rd32(E1000_TPR);
469 temp = rd32(E1000_TPT);
470 temp = rd32(E1000_MPTC);
471 temp = rd32(E1000_BPTC);
472}
473
474/**
475 * e1000_check_for_copper_link - Check for link (Copper)
476 * @hw: pointer to the HW structure
477 *
478 * Checks to see of the link status of the hardware has changed. If a
479 * change in link status has been detected, then we read the PHY registers
480 * to get the current speed/duplex if link exists.
481 **/
482s32 igb_check_for_copper_link(struct e1000_hw *hw)
483{
484 struct e1000_mac_info *mac = &hw->mac;
485 s32 ret_val;
486 bool link;
487
488 /*
489 * We only want to go out to the PHY registers to see if Auto-Neg
490 * has completed and/or if our link status has changed. The
491 * get_link_status flag is set upon receiving a Link Status
492 * Change or Rx Sequence Error interrupt.
493 */
494 if (!mac->get_link_status) {
495 ret_val = 0;
496 goto out;
497 }
498
499 /*
500 * First we want to see if the MII Status Register reports
501 * link. If so, then we want to get the current speed/duplex
502 * of the PHY.
503 */
504 ret_val = igb_phy_has_link(hw, 1, 0, &link);
505 if (ret_val)
506 goto out;
507
508 if (!link)
509 goto out; /* No link detected */
510
511 mac->get_link_status = false;
512
513 /*
514 * Check if there was DownShift, must be checked
515 * immediately after link-up
516 */
517 igb_check_downshift(hw);
518
519 /*
520 * If we are forcing speed/duplex, then we simply return since
521 * we have already determined whether we have link or not.
522 */
523 if (!mac->autoneg) {
524 ret_val = -E1000_ERR_CONFIG;
525 goto out;
526 }
527
528 /*
529 * Auto-Neg is enabled. Auto Speed Detection takes care
530 * of MAC speed/duplex configuration. So we only need to
531 * configure Collision Distance in the MAC.
532 */
533 igb_config_collision_dist(hw);
534
535 /*
536 * Configure Flow Control now that Auto-Neg has completed.
537 * First, we need to restore the desired flow control
538 * settings because we may have had to re-autoneg with a
539 * different link partner.
540 */
541 ret_val = igb_config_fc_after_link_up(hw);
542 if (ret_val)
543 hw_dbg(hw, "Error configuring flow control\n");
544
545out:
546 return ret_val;
547}
548
549/**
550 * e1000_setup_link - Setup flow control and link settings
551 * @hw: pointer to the HW structure
552 *
553 * Determines which flow control settings to use, then configures flow
554 * control. Calls the appropriate media-specific link configuration
555 * function. Assuming the adapter has a valid link partner, a valid link
556 * should be established. Assumes the hardware has previously been reset
557 * and the transmitter and receiver are not enabled.
558 **/
559s32 igb_setup_link(struct e1000_hw *hw)
560{
561 s32 ret_val = 0;
562
563 /*
564 * In the case of the phy reset being blocked, we already have a link.
565 * We do not need to set it up again.
566 */
567 if (igb_check_reset_block(hw))
568 goto out;
569
570 ret_val = igb_set_default_fc(hw);
571 if (ret_val)
572 goto out;
573
574 /*
575 * We want to save off the original Flow Control configuration just
576 * in case we get disconnected and then reconnected into a different
577 * hub or switch with different Flow Control capabilities.
578 */
579 hw->fc.original_type = hw->fc.type;
580
581 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
582
583 /* Call the necessary media_type subroutine to configure the link. */
584 ret_val = hw->mac.ops.setup_physical_interface(hw);
585 if (ret_val)
586 goto out;
587
588 /*
589 * Initialize the flow control address, type, and PAUSE timer
590 * registers to their default values. This is done even if flow
591 * control is disabled, because it does not hurt anything to
592 * initialize these registers.
593 */
594 hw_dbg(hw,
595 "Initializing the Flow Control address, type and timer regs\n");
596 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
597 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
598 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
599
600 wr32(E1000_FCTTV, hw->fc.pause_time);
601
602 ret_val = igb_set_fc_watermarks(hw);
603
604out:
605 return ret_val;
606}
607
608/**
609 * e1000_config_collision_dist - Configure collision distance
610 * @hw: pointer to the HW structure
611 *
612 * Configures the collision distance to the default value and is used
613 * during link setup. Currently no func pointer exists and all
614 * implementations are handled in the generic version of this function.
615 **/
616void igb_config_collision_dist(struct e1000_hw *hw)
617{
618 u32 tctl;
619
620 tctl = rd32(E1000_TCTL);
621
622 tctl &= ~E1000_TCTL_COLD;
623 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
624
625 wr32(E1000_TCTL, tctl);
626 wrfl();
627}
628
629/**
630 * e1000_set_fc_watermarks - Set flow control high/low watermarks
631 * @hw: pointer to the HW structure
632 *
633 * Sets the flow control high/low threshold (watermark) registers. If
634 * flow control XON frame transmission is enabled, then set XON frame
635 * tansmission as well.
636 **/
637static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
638{
639 s32 ret_val = 0;
640 u32 fcrtl = 0, fcrth = 0;
641
642 /*
643 * Set the flow control receive threshold registers. Normally,
644 * these registers will be set to a default threshold that may be
645 * adjusted later by the driver's runtime code. However, if the
646 * ability to transmit pause frames is not enabled, then these
647 * registers will be set to 0.
648 */
649 if (hw->fc.type & e1000_fc_tx_pause) {
650 /*
651 * We need to set up the Receive Threshold high and low water
652 * marks as well as (optionally) enabling the transmission of
653 * XON frames.
654 */
655 fcrtl = hw->fc.low_water;
656 if (hw->fc.send_xon)
657 fcrtl |= E1000_FCRTL_XONE;
658
659 fcrth = hw->fc.high_water;
660 }
661 wr32(E1000_FCRTL, fcrtl);
662 wr32(E1000_FCRTH, fcrth);
663
664 return ret_val;
665}
666
667/**
668 * e1000_set_default_fc - Set flow control default values
669 * @hw: pointer to the HW structure
670 *
671 * Read the EEPROM for the default values for flow control and store the
672 * values.
673 **/
674static s32 igb_set_default_fc(struct e1000_hw *hw)
675{
676 s32 ret_val = 0;
677 u16 nvm_data;
678
679 /*
680 * Read and store word 0x0F of the EEPROM. This word contains bits
681 * that determine the hardware's default PAUSE (flow control) mode,
682 * a bit that determines whether the HW defaults to enabling or
683 * disabling auto-negotiation, and the direction of the
684 * SW defined pins. If there is no SW over-ride of the flow
685 * control setting, then the variable hw->fc will
686 * be initialized based on a value in the EEPROM.
687 */
688 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1,
689 &nvm_data);
690
691 if (ret_val) {
692 hw_dbg(hw, "NVM Read Error\n");
693 goto out;
694 }
695
696 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
697 hw->fc.type = e1000_fc_none;
698 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
699 NVM_WORD0F_ASM_DIR)
700 hw->fc.type = e1000_fc_tx_pause;
701 else
702 hw->fc.type = e1000_fc_full;
703
704out:
705 return ret_val;
706}
707
708/**
709 * e1000_force_mac_fc - Force the MAC's flow control settings
710 * @hw: pointer to the HW structure
711 *
712 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
713 * device control register to reflect the adapter settings. TFCE and RFCE
714 * need to be explicitly set by software when a copper PHY is used because
715 * autonegotiation is managed by the PHY rather than the MAC. Software must
716 * also configure these bits when link is forced on a fiber connection.
717 **/
718s32 igb_force_mac_fc(struct e1000_hw *hw)
719{
720 u32 ctrl;
721 s32 ret_val = 0;
722
723 ctrl = rd32(E1000_CTRL);
724
725 /*
726 * Because we didn't get link via the internal auto-negotiation
727 * mechanism (we either forced link or we got link via PHY
728 * auto-neg), we have to manually enable/disable transmit an
729 * receive flow control.
730 *
731 * The "Case" statement below enables/disable flow control
732 * according to the "hw->fc.type" parameter.
733 *
734 * The possible values of the "fc" parameter are:
735 * 0: Flow control is completely disabled
736 * 1: Rx flow control is enabled (we can receive pause
737 * frames but not send pause frames).
738 * 2: Tx flow control is enabled (we can send pause frames
739 * frames but we do not receive pause frames).
740 * 3: Both Rx and TX flow control (symmetric) is enabled.
741 * other: No other values should be possible at this point.
742 */
743 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
744
745 switch (hw->fc.type) {
746 case e1000_fc_none:
747 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
748 break;
749 case e1000_fc_rx_pause:
750 ctrl &= (~E1000_CTRL_TFCE);
751 ctrl |= E1000_CTRL_RFCE;
752 break;
753 case e1000_fc_tx_pause:
754 ctrl &= (~E1000_CTRL_RFCE);
755 ctrl |= E1000_CTRL_TFCE;
756 break;
757 case e1000_fc_full:
758 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
759 break;
760 default:
761 hw_dbg(hw, "Flow control param set incorrectly\n");
762 ret_val = -E1000_ERR_CONFIG;
763 goto out;
764 }
765
766 wr32(E1000_CTRL, ctrl);
767
768out:
769 return ret_val;
770}
771
772/**
773 * e1000_config_fc_after_link_up - Configures flow control after link
774 * @hw: pointer to the HW structure
775 *
776 * Checks the status of auto-negotiation after link up to ensure that the
777 * speed and duplex were not forced. If the link needed to be forced, then
778 * flow control needs to be forced also. If auto-negotiation is enabled
779 * and did not fail, then we configure flow control based on our link
780 * partner.
781 **/
782s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
783{
784 struct e1000_mac_info *mac = &hw->mac;
785 s32 ret_val = 0;
786 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
787 u16 speed, duplex;
788
789 /*
790 * Check for the case where we have fiber media and auto-neg failed
791 * so we had to force link. In this case, we need to force the
792 * configuration of the MAC to match the "fc" parameter.
793 */
794 if (mac->autoneg_failed) {
795 if (hw->phy.media_type == e1000_media_type_fiber ||
796 hw->phy.media_type == e1000_media_type_internal_serdes)
797 ret_val = igb_force_mac_fc(hw);
798 } else {
799 if (hw->phy.media_type == e1000_media_type_copper)
800 ret_val = igb_force_mac_fc(hw);
801 }
802
803 if (ret_val) {
804 hw_dbg(hw, "Error forcing flow control settings\n");
805 goto out;
806 }
807
808 /*
809 * Check for the case where we have copper media and auto-neg is
810 * enabled. In this case, we need to check and see if Auto-Neg
811 * has completed, and if so, how the PHY and link partner has
812 * flow control configured.
813 */
814 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
815 /*
816 * Read the MII Status Register and check to see if AutoNeg
817 * has completed. We read this twice because this reg has
818 * some "sticky" (latched) bits.
819 */
820 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
821 &mii_status_reg);
822 if (ret_val)
823 goto out;
824 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
825 &mii_status_reg);
826 if (ret_val)
827 goto out;
828
829 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
830 hw_dbg(hw, "Copper PHY and Auto Neg "
831 "has not completed.\n");
832 goto out;
833 }
834
835 /*
836 * The AutoNeg process has completed, so we now need to
837 * read both the Auto Negotiation Advertisement
838 * Register (Address 4) and the Auto_Negotiation Base
839 * Page Ability Register (Address 5) to determine how
840 * flow control was negotiated.
841 */
842 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
843 &mii_nway_adv_reg);
844 if (ret_val)
845 goto out;
846 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY,
847 &mii_nway_lp_ability_reg);
848 if (ret_val)
849 goto out;
850
851 /*
852 * Two bits in the Auto Negotiation Advertisement Register
853 * (Address 4) and two bits in the Auto Negotiation Base
854 * Page Ability Register (Address 5) determine flow control
855 * for both the PHY and the link partner. The following
856 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
857 * 1999, describes these PAUSE resolution bits and how flow
858 * control is determined based upon these settings.
859 * NOTE: DC = Don't Care
860 *
861 * LOCAL DEVICE | LINK PARTNER
862 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
863 *-------|---------|-------|---------|--------------------
864 * 0 | 0 | DC | DC | e1000_fc_none
865 * 0 | 1 | 0 | DC | e1000_fc_none
866 * 0 | 1 | 1 | 0 | e1000_fc_none
867 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
868 * 1 | 0 | 0 | DC | e1000_fc_none
869 * 1 | DC | 1 | DC | e1000_fc_full
870 * 1 | 1 | 0 | 0 | e1000_fc_none
871 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
872 *
873 * Are both PAUSE bits set to 1? If so, this implies
874 * Symmetric Flow Control is enabled at both ends. The
875 * ASM_DIR bits are irrelevant per the spec.
876 *
877 * For Symmetric Flow Control:
878 *
879 * LOCAL DEVICE | LINK PARTNER
880 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
881 *-------|---------|-------|---------|--------------------
882 * 1 | DC | 1 | DC | E1000_fc_full
883 *
884 */
885 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
886 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
887 /*
888 * Now we need to check if the user selected RX ONLY
889 * of pause frames. In this case, we had to advertise
890 * FULL flow control because we could not advertise RX
891 * ONLY. Hence, we must now check to see if we need to
892 * turn OFF the TRANSMISSION of PAUSE frames.
893 */
894 if (hw->fc.original_type == e1000_fc_full) {
895 hw->fc.type = e1000_fc_full;
896 hw_dbg(hw, "Flow Control = FULL.\r\n");
897 } else {
898 hw->fc.type = e1000_fc_rx_pause;
899 hw_dbg(hw, "Flow Control = "
900 "RX PAUSE frames only.\r\n");
901 }
902 }
903 /*
904 * For receiving PAUSE frames ONLY.
905 *
906 * LOCAL DEVICE | LINK PARTNER
907 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
908 *-------|---------|-------|---------|--------------------
909 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
910 */
911 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
912 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
913 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
914 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
915 hw->fc.type = e1000_fc_tx_pause;
916 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
917 }
918 /*
919 * For transmitting PAUSE frames ONLY.
920 *
921 * LOCAL DEVICE | LINK PARTNER
922 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
923 *-------|---------|-------|---------|--------------------
924 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
925 */
926 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
927 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
928 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
929 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
930 hw->fc.type = e1000_fc_rx_pause;
931 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
932 }
933 /*
934 * Per the IEEE spec, at this point flow control should be
935 * disabled. However, we want to consider that we could
936 * be connected to a legacy switch that doesn't advertise
937 * desired flow control, but can be forced on the link
938 * partner. So if we advertised no flow control, that is
939 * what we will resolve to. If we advertised some kind of
940 * receive capability (Rx Pause Only or Full Flow Control)
941 * and the link partner advertised none, we will configure
942 * ourselves to enable Rx Flow Control only. We can do
943 * this safely for two reasons: If the link partner really
944 * didn't want flow control enabled, and we enable Rx, no
945 * harm done since we won't be receiving any PAUSE frames
946 * anyway. If the intent on the link partner was to have
947 * flow control enabled, then by us enabling RX only, we
948 * can at least receive pause frames and process them.
949 * This is a good idea because in most cases, since we are
950 * predominantly a server NIC, more times than not we will
951 * be asked to delay transmission of packets than asking
952 * our link partner to pause transmission of frames.
953 */
954 else if ((hw->fc.original_type == e1000_fc_none ||
955 hw->fc.original_type == e1000_fc_tx_pause) ||
956 hw->fc.strict_ieee) {
957 hw->fc.type = e1000_fc_none;
958 hw_dbg(hw, "Flow Control = NONE.\r\n");
959 } else {
960 hw->fc.type = e1000_fc_rx_pause;
961 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
962 }
963
964 /*
965 * Now we need to do one last check... If we auto-
966 * negotiated to HALF DUPLEX, flow control should not be
967 * enabled per IEEE 802.3 spec.
968 */
969 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
970 if (ret_val) {
971 hw_dbg(hw, "Error getting link speed and duplex\n");
972 goto out;
973 }
974
975 if (duplex == HALF_DUPLEX)
976 hw->fc.type = e1000_fc_none;
977
978 /*
979 * Now we call a subroutine to actually force the MAC
980 * controller to use the correct flow control settings.
981 */
982 ret_val = igb_force_mac_fc(hw);
983 if (ret_val) {
984 hw_dbg(hw, "Error forcing flow control settings\n");
985 goto out;
986 }
987 }
988
989out:
990 return ret_val;
991}
992
993/**
994 * e1000_get_speed_and_duplex_copper - Retreive current speed/duplex
995 * @hw: pointer to the HW structure
996 * @speed: stores the current speed
997 * @duplex: stores the current duplex
998 *
999 * Read the status register for the current speed/duplex and store the current
1000 * speed and duplex for copper connections.
1001 **/
1002s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1003 u16 *duplex)
1004{
1005 u32 status;
1006
1007 status = rd32(E1000_STATUS);
1008 if (status & E1000_STATUS_SPEED_1000) {
1009 *speed = SPEED_1000;
1010 hw_dbg(hw, "1000 Mbs, ");
1011 } else if (status & E1000_STATUS_SPEED_100) {
1012 *speed = SPEED_100;
1013 hw_dbg(hw, "100 Mbs, ");
1014 } else {
1015 *speed = SPEED_10;
1016 hw_dbg(hw, "10 Mbs, ");
1017 }
1018
1019 if (status & E1000_STATUS_FD) {
1020 *duplex = FULL_DUPLEX;
1021 hw_dbg(hw, "Full Duplex\n");
1022 } else {
1023 *duplex = HALF_DUPLEX;
1024 hw_dbg(hw, "Half Duplex\n");
1025 }
1026
1027 return 0;
1028}
1029
1030/**
1031 * e1000_get_hw_semaphore - Acquire hardware semaphore
1032 * @hw: pointer to the HW structure
1033 *
1034 * Acquire the HW semaphore to access the PHY or NVM
1035 **/
1036s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1037{
1038 u32 swsm;
1039 s32 ret_val = 0;
1040 s32 timeout = hw->nvm.word_size + 1;
1041 s32 i = 0;
1042
1043 /* Get the SW semaphore */
1044 while (i < timeout) {
1045 swsm = rd32(E1000_SWSM);
1046 if (!(swsm & E1000_SWSM_SMBI))
1047 break;
1048
1049 udelay(50);
1050 i++;
1051 }
1052
1053 if (i == timeout) {
1054 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1055 ret_val = -E1000_ERR_NVM;
1056 goto out;
1057 }
1058
1059 /* Get the FW semaphore. */
1060 for (i = 0; i < timeout; i++) {
1061 swsm = rd32(E1000_SWSM);
1062 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1063
1064 /* Semaphore acquired if bit latched */
1065 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1066 break;
1067
1068 udelay(50);
1069 }
1070
1071 if (i == timeout) {
1072 /* Release semaphores */
1073 igb_put_hw_semaphore(hw);
1074 hw_dbg(hw, "Driver can't access the NVM\n");
1075 ret_val = -E1000_ERR_NVM;
1076 goto out;
1077 }
1078
1079out:
1080 return ret_val;
1081}
1082
1083/**
1084 * e1000_put_hw_semaphore - Release hardware semaphore
1085 * @hw: pointer to the HW structure
1086 *
1087 * Release hardware semaphore used to access the PHY or NVM
1088 **/
1089void igb_put_hw_semaphore(struct e1000_hw *hw)
1090{
1091 u32 swsm;
1092
1093 swsm = rd32(E1000_SWSM);
1094
1095 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1096
1097 wr32(E1000_SWSM, swsm);
1098}
1099
1100/**
1101 * e1000_get_auto_rd_done - Check for auto read completion
1102 * @hw: pointer to the HW structure
1103 *
1104 * Check EEPROM for Auto Read done bit.
1105 **/
1106s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1107{
1108 s32 i = 0;
1109 s32 ret_val = 0;
1110
1111
1112 while (i < AUTO_READ_DONE_TIMEOUT) {
1113 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1114 break;
1115 msleep(1);
1116 i++;
1117 }
1118
1119 if (i == AUTO_READ_DONE_TIMEOUT) {
1120 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1121 ret_val = -E1000_ERR_RESET;
1122 goto out;
1123 }
1124
1125out:
1126 return ret_val;
1127}
1128
1129/**
1130 * e1000_valid_led_default - Verify a valid default LED config
1131 * @hw: pointer to the HW structure
1132 * @data: pointer to the NVM (EEPROM)
1133 *
1134 * Read the EEPROM for the current default LED configuration. If the
1135 * LED configuration is not valid, set to a valid LED configuration.
1136 **/
1137static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1138{
1139 s32 ret_val;
1140
1141 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1142 if (ret_val) {
1143 hw_dbg(hw, "NVM Read Error\n");
1144 goto out;
1145 }
1146
1147 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1148 *data = ID_LED_DEFAULT;
1149
1150out:
1151 return ret_val;
1152}
1153
1154/**
1155 * e1000_id_led_init -
1156 * @hw: pointer to the HW structure
1157 *
1158 **/
1159s32 igb_id_led_init(struct e1000_hw *hw)
1160{
1161 struct e1000_mac_info *mac = &hw->mac;
1162 s32 ret_val;
1163 const u32 ledctl_mask = 0x000000FF;
1164 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1165 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1166 u16 data, i, temp;
1167 const u16 led_mask = 0x0F;
1168
1169 ret_val = igb_valid_led_default(hw, &data);
1170 if (ret_val)
1171 goto out;
1172
1173 mac->ledctl_default = rd32(E1000_LEDCTL);
1174 mac->ledctl_mode1 = mac->ledctl_default;
1175 mac->ledctl_mode2 = mac->ledctl_default;
1176
1177 for (i = 0; i < 4; i++) {
1178 temp = (data >> (i << 2)) & led_mask;
1179 switch (temp) {
1180 case ID_LED_ON1_DEF2:
1181 case ID_LED_ON1_ON2:
1182 case ID_LED_ON1_OFF2:
1183 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1184 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1185 break;
1186 case ID_LED_OFF1_DEF2:
1187 case ID_LED_OFF1_ON2:
1188 case ID_LED_OFF1_OFF2:
1189 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1190 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1191 break;
1192 default:
1193 /* Do nothing */
1194 break;
1195 }
1196 switch (temp) {
1197 case ID_LED_DEF1_ON2:
1198 case ID_LED_ON1_ON2:
1199 case ID_LED_OFF1_ON2:
1200 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1201 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1202 break;
1203 case ID_LED_DEF1_OFF2:
1204 case ID_LED_ON1_OFF2:
1205 case ID_LED_OFF1_OFF2:
1206 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1207 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1208 break;
1209 default:
1210 /* Do nothing */
1211 break;
1212 }
1213 }
1214
1215out:
1216 return ret_val;
1217}
1218
1219/**
1220 * e1000_cleanup_led - Set LED config to default operation
1221 * @hw: pointer to the HW structure
1222 *
1223 * Remove the current LED configuration and set the LED configuration
1224 * to the default value, saved from the EEPROM.
1225 **/
1226s32 igb_cleanup_led(struct e1000_hw *hw)
1227{
1228 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1229 return 0;
1230}
1231
1232/**
1233 * e1000_blink_led - Blink LED
1234 * @hw: pointer to the HW structure
1235 *
1236 * Blink the led's which are set to be on.
1237 **/
1238s32 igb_blink_led(struct e1000_hw *hw)
1239{
1240 u32 ledctl_blink = 0;
1241 u32 i;
1242
1243 if (hw->phy.media_type == e1000_media_type_fiber) {
1244 /* always blink LED0 for PCI-E fiber */
1245 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1246 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1247 } else {
1248 /*
1249 * set the blink bit for each LED that's "on" (0x0E)
1250 * in ledctl_mode2
1251 */
1252 ledctl_blink = hw->mac.ledctl_mode2;
1253 for (i = 0; i < 4; i++)
1254 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1255 E1000_LEDCTL_MODE_LED_ON)
1256 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1257 (i * 8));
1258 }
1259
1260 wr32(E1000_LEDCTL, ledctl_blink);
1261
1262 return 0;
1263}
1264
1265/**
1266 * e1000_led_off - Turn LED off
1267 * @hw: pointer to the HW structure
1268 *
1269 * Turn LED off.
1270 **/
1271s32 igb_led_off(struct e1000_hw *hw)
1272{
1273 u32 ctrl;
1274
1275 switch (hw->phy.media_type) {
1276 case e1000_media_type_fiber:
1277 ctrl = rd32(E1000_CTRL);
1278 ctrl |= E1000_CTRL_SWDPIN0;
1279 ctrl |= E1000_CTRL_SWDPIO0;
1280 wr32(E1000_CTRL, ctrl);
1281 break;
1282 case e1000_media_type_copper:
1283 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1284 break;
1285 default:
1286 break;
1287 }
1288
1289 return 0;
1290}
1291
1292/**
1293 * e1000_disable_pcie_master - Disables PCI-express master access
1294 * @hw: pointer to the HW structure
1295 *
1296 * Returns 0 (0) if successful, else returns -10
1297 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1298 * the master requests to be disabled.
1299 *
1300 * Disables PCI-Express master access and verifies there are no pending
1301 * requests.
1302 **/
1303s32 igb_disable_pcie_master(struct e1000_hw *hw)
1304{
1305 u32 ctrl;
1306 s32 timeout = MASTER_DISABLE_TIMEOUT;
1307 s32 ret_val = 0;
1308
1309 if (hw->bus.type != e1000_bus_type_pci_express)
1310 goto out;
1311
1312 ctrl = rd32(E1000_CTRL);
1313 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1314 wr32(E1000_CTRL, ctrl);
1315
1316 while (timeout) {
1317 if (!(rd32(E1000_STATUS) &
1318 E1000_STATUS_GIO_MASTER_ENABLE))
1319 break;
1320 udelay(100);
1321 timeout--;
1322 }
1323
1324 if (!timeout) {
1325 hw_dbg(hw, "Master requests are pending.\n");
1326 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1327 goto out;
1328 }
1329
1330out:
1331 return ret_val;
1332}
1333
1334/**
1335 * e1000_reset_adaptive - Reset Adaptive Interframe Spacing
1336 * @hw: pointer to the HW structure
1337 *
1338 * Reset the Adaptive Interframe Spacing throttle to default values.
1339 **/
1340void igb_reset_adaptive(struct e1000_hw *hw)
1341{
1342 struct e1000_mac_info *mac = &hw->mac;
1343
1344 if (!mac->adaptive_ifs) {
1345 hw_dbg(hw, "Not in Adaptive IFS mode!\n");
1346 goto out;
1347 }
1348
1349 if (!mac->ifs_params_forced) {
1350 mac->current_ifs_val = 0;
1351 mac->ifs_min_val = IFS_MIN;
1352 mac->ifs_max_val = IFS_MAX;
1353 mac->ifs_step_size = IFS_STEP;
1354 mac->ifs_ratio = IFS_RATIO;
1355 }
1356
1357 mac->in_ifs_mode = false;
1358 wr32(E1000_AIT, 0);
1359out:
1360 return;
1361}
1362
1363/**
1364 * e1000_update_adaptive - Update Adaptive Interframe Spacing
1365 * @hw: pointer to the HW structure
1366 *
1367 * Update the Adaptive Interframe Spacing Throttle value based on the
1368 * time between transmitted packets and time between collisions.
1369 **/
1370void igb_update_adaptive(struct e1000_hw *hw)
1371{
1372 struct e1000_mac_info *mac = &hw->mac;
1373
1374 if (!mac->adaptive_ifs) {
1375 hw_dbg(hw, "Not in Adaptive IFS mode!\n");
1376 goto out;
1377 }
1378
1379 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1380 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1381 mac->in_ifs_mode = true;
1382 if (mac->current_ifs_val < mac->ifs_max_val) {
1383 if (!mac->current_ifs_val)
1384 mac->current_ifs_val = mac->ifs_min_val;
1385 else
1386 mac->current_ifs_val +=
1387 mac->ifs_step_size;
1388 wr32(E1000_AIT,
1389 mac->current_ifs_val);
1390 }
1391 }
1392 } else {
1393 if (mac->in_ifs_mode &&
1394 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1395 mac->current_ifs_val = 0;
1396 mac->in_ifs_mode = false;
1397 wr32(E1000_AIT, 0);
1398 }
1399 }
1400out:
1401 return;
1402}
1403
1404/**
1405 * e1000_validate_mdi_setting - Verify MDI/MDIx settings
1406 * @hw: pointer to the HW structure
1407 *
1408 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1409 * set, which is forced to MDI mode only.
1410 **/
1411s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1412{
1413 s32 ret_val = 0;
1414
1415 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1416 hw_dbg(hw, "Invalid MDI setting detected\n");
1417 hw->phy.mdix = 1;
1418 ret_val = -E1000_ERR_CONFIG;
1419 goto out;
1420 }
1421
1422out:
1423 return ret_val;
1424}
1425
1426/**
1427 * e1000_write_8bit_ctrl_reg - Write a 8bit CTRL register
1428 * @hw: pointer to the HW structure
1429 * @reg: 32bit register offset such as E1000_SCTL
1430 * @offset: register offset to write to
1431 * @data: data to write at register offset
1432 *
1433 * Writes an address/data control type register. There are several of these
1434 * and they all have the format address << 8 | data and bit 31 is polled for
1435 * completion.
1436 **/
1437s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1438 u32 offset, u8 data)
1439{
1440 u32 i, regvalue = 0;
1441 s32 ret_val = 0;
1442
1443 /* Set up the address and data */
1444 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1445 wr32(reg, regvalue);
1446
1447 /* Poll the ready bit to see if the MDI read completed */
1448 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1449 udelay(5);
1450 regvalue = rd32(reg);
1451 if (regvalue & E1000_GEN_CTL_READY)
1452 break;
1453 }
1454 if (!(regvalue & E1000_GEN_CTL_READY)) {
1455 hw_dbg(hw, "Reg %08x did not indicate ready\n", reg);
1456 ret_val = -E1000_ERR_PHY;
1457 goto out;
1458 }
1459
1460out:
1461 return ret_val;
1462}
1463
1464/**
1465 * e1000_enable_mng_pass_thru - Enable processing of ARP's
1466 * @hw: pointer to the HW structure
1467 *
1468 * Verifies the hardware needs to allow ARPs to be processed by the host.
1469 **/
1470bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1471{
1472 u32 manc;
1473 u32 fwsm, factps;
1474 bool ret_val = false;
1475
1476 if (!hw->mac.asf_firmware_present)
1477 goto out;
1478
1479 manc = rd32(E1000_MANC);
1480
1481 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
1482 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1483 goto out;
1484
1485 if (hw->mac.arc_subsystem_valid) {
1486 fwsm = rd32(E1000_FWSM);
1487 factps = rd32(E1000_FACTPS);
1488
1489 if (!(factps & E1000_FACTPS_MNGCG) &&
1490 ((fwsm & E1000_FWSM_MODE_MASK) ==
1491 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1492 ret_val = true;
1493 goto out;
1494 }
1495 } else {
1496 if ((manc & E1000_MANC_SMBUS_EN) &&
1497 !(manc & E1000_MANC_ASF_EN)) {
1498 ret_val = true;
1499 goto out;
1500 }
1501 }
1502
1503out:
1504 return ret_val;
1505}