aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/e1000e/82571.c1351
-rw-r--r--drivers/net/e1000e/Makefile37
-rw-r--r--drivers/net/e1000e/defines.h739
-rw-r--r--drivers/net/e1000e/e1000.h514
-rw-r--r--drivers/net/e1000e/es2lan.c1232
-rw-r--r--drivers/net/e1000e/ethtool.c1774
-rw-r--r--drivers/net/e1000e/hw.h864
-rw-r--r--drivers/net/e1000e/ich8lan.c2225
-rw-r--r--drivers/net/e1000e/lib.c2487
-rw-r--r--drivers/net/e1000e/netdev.c4441
-rw-r--r--drivers/net/e1000e/param.c382
-rw-r--r--drivers/net/e1000e/phy.c1773
14 files changed, 17843 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 734f8403c806..502dd0eb8809 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2055,6 +2055,29 @@ config E1000_DISABLE_PACKET_SPLIT
2055 2055
2056 If in doubt, say N. 2056 If in doubt, say N.
2057 2057
2058config E1000E
2059 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2060 depends on PCI
2061 ---help---
2062 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2063 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
2064 use the regular e1000 driver For more information on how to
2065 identify your adapter, go to the Adapter & Driver ID Guide at:
2066
2067 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2068
2069 For general information and support, go to the Intel support
2070 website at:
2071
2072 <http://support.intel.com>
2073
2074 More specific information on configuring the driver is in
2075 <file:Documentation/networking/e1000e.txt>.
2076
2077 To compile this driver as a module, choose M here and read
2078 <file:Documentation/networking/net-modules.txt>. The module
2079 will be called e1000e.
2080
2058source "drivers/net/ixp2000/Kconfig" 2081source "drivers/net/ixp2000/Kconfig"
2059 2082
2060config MYRI_SBUS 2083config MYRI_SBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d6f7302ab72d..0d2b4bee587c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_E1000) += e1000/ 5obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_E1000E) += e1000e/
6obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 7obj-$(CONFIG_IBM_EMAC) += ibm_emac/
7obj-$(CONFIG_IXGB) += ixgb/ 8obj-$(CONFIG_IXGB) += ixgb/
8obj-$(CONFIG_CHELSIO_T1) += chelsio/ 9obj-$(CONFIG_CHELSIO_T1) += chelsio/
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
new file mode 100644
index 000000000000..cf70522fc851
--- /dev/null
+++ b/drivers/net/e1000e/82571.c
@@ -0,0 +1,1351 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Fiber)
32 * 82572EI Gigabit Ethernet Controller (Copper)
33 * 82572EI Gigabit Ethernet Controller (Fiber)
34 * 82572EI Gigabit Ethernet Controller
35 * 82573V Gigabit Ethernet Controller (Copper)
36 * 82573E Gigabit Ethernet Controller (Copper)
37 * 82573L Gigabit Ethernet Controller
38 */
39
40#include <linux/netdevice.h>
41#include <linux/delay.h>
42#include <linux/pci.h>
43
44#include "e1000.h"
45
46#define ID_LED_RESERVED_F746 0xF746
47#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
48 (ID_LED_OFF1_ON2 << 8) | \
49 (ID_LED_DEF1_DEF2 << 4) | \
50 (ID_LED_DEF1_DEF2))
51
52#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
53
54static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
55static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
56static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
57static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
58 u16 words, u16 *data);
59static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
60static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
61static s32 e1000_setup_link_82571(struct e1000_hw *hw);
62static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
63
64/**
65 * e1000_init_phy_params_82571 - Init PHY func ptrs.
66 * @hw: pointer to the HW structure
67 *
68 * This is a function pointer entry point called by the api module.
69 **/
70static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
71{
72 struct e1000_phy_info *phy = &hw->phy;
73 s32 ret_val;
74
75 if (hw->media_type != e1000_media_type_copper) {
76 phy->type = e1000_phy_none;
77 return 0;
78 }
79
80 phy->addr = 1;
81 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
82 phy->reset_delay_us = 100;
83
84 switch (hw->mac.type) {
85 case e1000_82571:
86 case e1000_82572:
87 phy->type = e1000_phy_igp_2;
88 break;
89 case e1000_82573:
90 phy->type = e1000_phy_m88;
91 break;
92 default:
93 return -E1000_ERR_PHY;
94 break;
95 }
96
97 /* This can only be done after all function pointers are setup. */
98 ret_val = e1000_get_phy_id_82571(hw);
99
100 /* Verify phy id */
101 switch (hw->mac.type) {
102 case e1000_82571:
103 case e1000_82572:
104 if (phy->id != IGP01E1000_I_PHY_ID)
105 return -E1000_ERR_PHY;
106 break;
107 case e1000_82573:
108 if (phy->id != M88E1111_I_PHY_ID)
109 return -E1000_ERR_PHY;
110 break;
111 default:
112 return -E1000_ERR_PHY;
113 break;
114 }
115
116 return 0;
117}
118
119/**
120 * e1000_init_nvm_params_82571 - Init NVM func ptrs.
121 * @hw: pointer to the HW structure
122 *
123 * This is a function pointer entry point called by the api module.
124 **/
125static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
126{
127 struct e1000_nvm_info *nvm = &hw->nvm;
128 u32 eecd = er32(EECD);
129 u16 size;
130
131 nvm->opcode_bits = 8;
132 nvm->delay_usec = 1;
133 switch (nvm->override) {
134 case e1000_nvm_override_spi_large:
135 nvm->page_size = 32;
136 nvm->address_bits = 16;
137 break;
138 case e1000_nvm_override_spi_small:
139 nvm->page_size = 8;
140 nvm->address_bits = 8;
141 break;
142 default:
143 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
144 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
145 break;
146 }
147
148 switch (hw->mac.type) {
149 case e1000_82573:
150 if (((eecd >> 15) & 0x3) == 0x3) {
151 nvm->type = e1000_nvm_flash_hw;
152 nvm->word_size = 2048;
153 /* Autonomous Flash update bit must be cleared due
154 * to Flash update issue.
155 */
156 eecd &= ~E1000_EECD_AUPDEN;
157 ew32(EECD, eecd);
158 break;
159 }
160 /* Fall Through */
161 default:
162 nvm->type = e1000_nvm_eeprom_spi;
163 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
164 E1000_EECD_SIZE_EX_SHIFT);
165 /* Added to a constant, "size" becomes the left-shift value
166 * for setting word_size.
167 */
168 size += NVM_WORD_SIZE_BASE_SHIFT;
169 nvm->word_size = 1 << size;
170 break;
171 }
172
173 return 0;
174}
175
176/**
177 * e1000_init_mac_params_82571 - Init MAC func ptrs.
178 * @hw: pointer to the HW structure
179 *
180 * This is a function pointer entry point called by the api module.
181 **/
182static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
183{
184 struct e1000_hw *hw = &adapter->hw;
185 struct e1000_mac_info *mac = &hw->mac;
186 struct e1000_mac_operations *func = &mac->ops;
187
188 /* Set media type */
189 switch (adapter->pdev->device) {
190 case E1000_DEV_ID_82571EB_FIBER:
191 case E1000_DEV_ID_82572EI_FIBER:
192 case E1000_DEV_ID_82571EB_QUAD_FIBER:
193 hw->media_type = e1000_media_type_fiber;
194 break;
195 case E1000_DEV_ID_82571EB_SERDES:
196 case E1000_DEV_ID_82572EI_SERDES:
197 hw->media_type = e1000_media_type_internal_serdes;
198 break;
199 default:
200 hw->media_type = e1000_media_type_copper;
201 break;
202 }
203
204 /* Set mta register count */
205 mac->mta_reg_count = 128;
206 /* Set rar entry count */
207 mac->rar_entry_count = E1000_RAR_ENTRIES;
208 /* Set if manageability features are enabled. */
209 mac->arc_subsystem_valid =
210 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
211
212 /* check for link */
213 switch (hw->media_type) {
214 case e1000_media_type_copper:
215 func->setup_physical_interface = e1000_setup_copper_link_82571;
216 func->check_for_link = e1000e_check_for_copper_link;
217 func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
218 break;
219 case e1000_media_type_fiber:
220 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
221 func->check_for_link = e1000e_check_for_fiber_link;
222 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
223 break;
224 case e1000_media_type_internal_serdes:
225 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
226 func->check_for_link = e1000e_check_for_serdes_link;
227 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
228 break;
229 default:
230 return -E1000_ERR_CONFIG;
231 break;
232 }
233
234 return 0;
235}
236
237static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter)
238{
239 struct e1000_hw *hw = &adapter->hw;
240 static int global_quad_port_a; /* global port a indication */
241 struct pci_dev *pdev = adapter->pdev;
242 u16 eeprom_data = 0;
243 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
244 s32 rc;
245
246 rc = e1000_init_mac_params_82571(adapter);
247 if (rc)
248 return rc;
249
250 rc = e1000_init_nvm_params_82571(hw);
251 if (rc)
252 return rc;
253
254 rc = e1000_init_phy_params_82571(hw);
255 if (rc)
256 return rc;
257
258 /* tag quad port adapters first, it's used below */
259 switch (pdev->device) {
260 case E1000_DEV_ID_82571EB_QUAD_COPPER:
261 case E1000_DEV_ID_82571EB_QUAD_FIBER:
262 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
263 adapter->flags |= FLAG_IS_QUAD_PORT;
264 /* mark the first port */
265 if (global_quad_port_a == 0)
266 adapter->flags |= FLAG_IS_QUAD_PORT_A;
267 /* Reset for multiple quad port adapters */
268 global_quad_port_a++;
269 if (global_quad_port_a == 4)
270 global_quad_port_a = 0;
271 break;
272 default:
273 break;
274 }
275
276 switch (adapter->hw.mac.type) {
277 case e1000_82571:
278 /* these dual ports don't have WoL on port B at all */
279 if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
280 (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
281 (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
282 (is_port_b))
283 adapter->flags &= ~FLAG_HAS_WOL;
284 /* quad ports only support WoL on port A */
285 if (adapter->flags & FLAG_IS_QUAD_PORT &&
286 (!adapter->flags & FLAG_IS_QUAD_PORT_A))
287 adapter->flags &= ~FLAG_HAS_WOL;
288 break;
289
290 case e1000_82573:
291 if (pdev->device == E1000_DEV_ID_82573L) {
292 e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
293 &eeprom_data);
294 if (eeprom_data & NVM_WORD1A_ASPM_MASK)
295 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
296 }
297 break;
298 default:
299 break;
300 }
301
302 return 0;
303}
304
305/**
306 * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
307 * @hw: pointer to the HW structure
308 *
309 * Reads the PHY registers and stores the PHY ID and possibly the PHY
310 * revision in the hardware structure.
311 **/
312static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
313{
314 struct e1000_phy_info *phy = &hw->phy;
315
316 switch (hw->mac.type) {
317 case e1000_82571:
318 case e1000_82572:
319 /* The 82571 firmware may still be configuring the PHY.
320 * In this case, we cannot access the PHY until the
321 * configuration is done. So we explicitly set the
322 * PHY ID. */
323 phy->id = IGP01E1000_I_PHY_ID;
324 break;
325 case e1000_82573:
326 return e1000e_get_phy_id(hw);
327 break;
328 default:
329 return -E1000_ERR_PHY;
330 break;
331 }
332
333 return 0;
334}
335
336/**
337 * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
338 * @hw: pointer to the HW structure
339 *
340 * Acquire the HW semaphore to access the PHY or NVM
341 **/
342static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
343{
344 u32 swsm;
345 s32 timeout = hw->nvm.word_size + 1;
346 s32 i = 0;
347
348 /* Get the FW semaphore. */
349 for (i = 0; i < timeout; i++) {
350 swsm = er32(SWSM);
351 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
352
353 /* Semaphore acquired if bit latched */
354 if (er32(SWSM) & E1000_SWSM_SWESMBI)
355 break;
356
357 udelay(50);
358 }
359
360 if (i == timeout) {
361 /* Release semaphores */
362 e1000e_put_hw_semaphore(hw);
363 hw_dbg(hw, "Driver can't access the NVM\n");
364 return -E1000_ERR_NVM;
365 }
366
367 return 0;
368}
369
370/**
371 * e1000_put_hw_semaphore_82571 - Release hardware semaphore
372 * @hw: pointer to the HW structure
373 *
374 * Release hardware semaphore used to access the PHY or NVM
375 **/
376static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
377{
378 u32 swsm;
379
380 swsm = er32(SWSM);
381
382 swsm &= ~E1000_SWSM_SWESMBI;
383
384 ew32(SWSM, swsm);
385}
386
387/**
388 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
389 * @hw: pointer to the HW structure
390 *
391 * To gain access to the EEPROM, first we must obtain a hardware semaphore.
392 * Then for non-82573 hardware, set the EEPROM access request bit and wait
393 * for EEPROM access grant bit. If the access grant bit is not set, release
394 * hardware semaphore.
395 **/
396static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
397{
398 s32 ret_val;
399
400 ret_val = e1000_get_hw_semaphore_82571(hw);
401 if (ret_val)
402 return ret_val;
403
404 if (hw->mac.type != e1000_82573)
405 ret_val = e1000e_acquire_nvm(hw);
406
407 if (ret_val)
408 e1000_put_hw_semaphore_82571(hw);
409
410 return ret_val;
411}
412
413/**
414 * e1000_release_nvm_82571 - Release exclusive access to EEPROM
415 * @hw: pointer to the HW structure
416 *
417 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
418 **/
419static void e1000_release_nvm_82571(struct e1000_hw *hw)
420{
421 e1000e_release_nvm(hw);
422 e1000_put_hw_semaphore_82571(hw);
423}
424
425/**
426 * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
427 * @hw: pointer to the HW structure
428 * @offset: offset within the EEPROM to be written to
429 * @words: number of words to write
430 * @data: 16 bit word(s) to be written to the EEPROM
431 *
432 * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
433 *
434 * If e1000e_update_nvm_checksum is not called after this function, the
435 * EEPROM will most likley contain an invalid checksum.
436 **/
437static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
438 u16 *data)
439{
440 s32 ret_val;
441
442 switch (hw->mac.type) {
443 case e1000_82573:
444 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
445 break;
446 case e1000_82571:
447 case e1000_82572:
448 ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
449 break;
450 default:
451 ret_val = -E1000_ERR_NVM;
452 break;
453 }
454
455 return ret_val;
456}
457
458/**
459 * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
460 * @hw: pointer to the HW structure
461 *
462 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
463 * up to the checksum. Then calculates the EEPROM checksum and writes the
464 * value to the EEPROM.
465 **/
466static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
467{
468 u32 eecd;
469 s32 ret_val;
470 u16 i;
471
472 ret_val = e1000e_update_nvm_checksum_generic(hw);
473 if (ret_val)
474 return ret_val;
475
476 /* If our nvm is an EEPROM, then we're done
477 * otherwise, commit the checksum to the flash NVM. */
478 if (hw->nvm.type != e1000_nvm_flash_hw)
479 return ret_val;
480
481 /* Check for pending operations. */
482 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
483 msleep(1);
484 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
485 break;
486 }
487
488 if (i == E1000_FLASH_UPDATES)
489 return -E1000_ERR_NVM;
490
491 /* Reset the firmware if using STM opcode. */
492 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
493 /* The enabling of and the actual reset must be done
494 * in two write cycles.
495 */
496 ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
497 e1e_flush();
498 ew32(HICR, E1000_HICR_FW_RESET);
499 }
500
501 /* Commit the write to flash */
502 eecd = er32(EECD) | E1000_EECD_FLUPD;
503 ew32(EECD, eecd);
504
505 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
506 msleep(1);
507 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
508 break;
509 }
510
511 if (i == E1000_FLASH_UPDATES)
512 return -E1000_ERR_NVM;
513
514 return 0;
515}
516
517/**
518 * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
519 * @hw: pointer to the HW structure
520 *
521 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
522 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
523 **/
524static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
525{
526 if (hw->nvm.type == e1000_nvm_flash_hw)
527 e1000_fix_nvm_checksum_82571(hw);
528
529 return e1000e_validate_nvm_checksum_generic(hw);
530}
531
532/**
533 * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
534 * @hw: pointer to the HW structure
535 * @offset: offset within the EEPROM to be written to
536 * @words: number of words to write
537 * @data: 16 bit word(s) to be written to the EEPROM
538 *
539 * After checking for invalid values, poll the EEPROM to ensure the previous
540 * command has completed before trying to write the next word. After write
541 * poll for completion.
542 *
543 * If e1000e_update_nvm_checksum is not called after this function, the
544 * EEPROM will most likley contain an invalid checksum.
545 **/
546static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
547 u16 words, u16 *data)
548{
549 struct e1000_nvm_info *nvm = &hw->nvm;
550 u32 i;
551 u32 eewr = 0;
552 s32 ret_val = 0;
553
554 /* A check for invalid values: offset too large, too many words,
555 * and not enough words. */
556 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
557 (words == 0)) {
558 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
559 return -E1000_ERR_NVM;
560 }
561
562 for (i = 0; i < words; i++) {
563 eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
564 ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
565 E1000_NVM_RW_REG_START;
566
567 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
568 if (ret_val)
569 break;
570
571 ew32(EEWR, eewr);
572
573 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
574 if (ret_val)
575 break;
576 }
577
578 return ret_val;
579}
580
581/**
582 * e1000_get_cfg_done_82571 - Poll for configuration done
583 * @hw: pointer to the HW structure
584 *
585 * Reads the management control register for the config done bit to be set.
586 **/
587static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
588{
589 s32 timeout = PHY_CFG_TIMEOUT;
590
591 while (timeout) {
592 if (er32(EEMNGCTL) &
593 E1000_NVM_CFG_DONE_PORT_0)
594 break;
595 msleep(1);
596 timeout--;
597 }
598 if (!timeout) {
599 hw_dbg(hw, "MNG configuration cycle has not completed.\n");
600 return -E1000_ERR_RESET;
601 }
602
603 return 0;
604}
605
606/**
607 * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
608 * @hw: pointer to the HW structure
609 * @active: TRUE to enable LPLU, FALSE to disable
610 *
611 * Sets the LPLU D0 state according to the active flag. When activating LPLU
612 * this function also disables smart speed and vice versa. LPLU will not be
613 * activated unless the device autonegotiation advertisement meets standards
614 * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
615 * pointer entry point only called by PHY setup routines.
616 **/
617static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
618{
619 struct e1000_phy_info *phy = &hw->phy;
620 s32 ret_val;
621 u16 data;
622
623 ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
624 if (ret_val)
625 return ret_val;
626
627 if (active) {
628 data |= IGP02E1000_PM_D0_LPLU;
629 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
630 if (ret_val)
631 return ret_val;
632
633 /* When LPLU is enabled, we should disable SmartSpeed */
634 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
635 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
636 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
637 if (ret_val)
638 return ret_val;
639 } else {
640 data &= ~IGP02E1000_PM_D0_LPLU;
641 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
642 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
643 * during Dx states where the power conservation is most
644 * important. During driver activity we should enable
645 * SmartSpeed, so performance is maintained. */
646 if (phy->smart_speed == e1000_smart_speed_on) {
647 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
648 &data);
649 if (ret_val)
650 return ret_val;
651
652 data |= IGP01E1000_PSCFR_SMART_SPEED;
653 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
654 data);
655 if (ret_val)
656 return ret_val;
657 } else if (phy->smart_speed == e1000_smart_speed_off) {
658 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
659 &data);
660 if (ret_val)
661 return ret_val;
662
663 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
664 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
665 data);
666 if (ret_val)
667 return ret_val;
668 }
669 }
670
671 return 0;
672}
673
674/**
675 * e1000_reset_hw_82571 - Reset hardware
676 * @hw: pointer to the HW structure
677 *
678 * This resets the hardware into a known state. This is a
679 * function pointer entry point called by the api module.
680 **/
681static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
682{
683 u32 ctrl;
684 u32 extcnf_ctrl;
685 u32 ctrl_ext;
686 u32 icr;
687 s32 ret_val;
688 u16 i = 0;
689
690 /* Prevent the PCI-E bus from sticking if there is no TLP connection
691 * on the last TLP read/write transaction when MAC is reset.
692 */
693 ret_val = e1000e_disable_pcie_master(hw);
694 if (ret_val)
695 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
696
697 hw_dbg(hw, "Masking off all interrupts\n");
698 ew32(IMC, 0xffffffff);
699
700 ew32(RCTL, 0);
701 ew32(TCTL, E1000_TCTL_PSP);
702 e1e_flush();
703
704 msleep(10);
705
706 /* Must acquire the MDIO ownership before MAC reset.
707 * Ownership defaults to firmware after a reset. */
708 if (hw->mac.type == e1000_82573) {
709 extcnf_ctrl = er32(EXTCNF_CTRL);
710 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
711
712 do {
713 ew32(EXTCNF_CTRL, extcnf_ctrl);
714 extcnf_ctrl = er32(EXTCNF_CTRL);
715
716 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
717 break;
718
719 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
720
721 msleep(2);
722 i++;
723 } while (i < MDIO_OWNERSHIP_TIMEOUT);
724 }
725
726 ctrl = er32(CTRL);
727
728 hw_dbg(hw, "Issuing a global reset to MAC\n");
729 ew32(CTRL, ctrl | E1000_CTRL_RST);
730
731 if (hw->nvm.type == e1000_nvm_flash_hw) {
732 udelay(10);
733 ctrl_ext = er32(CTRL_EXT);
734 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
735 ew32(CTRL_EXT, ctrl_ext);
736 e1e_flush();
737 }
738
739 ret_val = e1000e_get_auto_rd_done(hw);
740 if (ret_val)
741 /* We don't want to continue accessing MAC registers. */
742 return ret_val;
743
744 /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
745 * Need to wait for Phy configuration completion before accessing
746 * NVM and Phy.
747 */
748 if (hw->mac.type == e1000_82573)
749 msleep(25);
750
751 /* Clear any pending interrupt events. */
752 ew32(IMC, 0xffffffff);
753 icr = er32(ICR);
754
755 return 0;
756}
757
758/**
759 * e1000_init_hw_82571 - Initialize hardware
760 * @hw: pointer to the HW structure
761 *
762 * This inits the hardware readying it for operation.
763 **/
764static s32 e1000_init_hw_82571(struct e1000_hw *hw)
765{
766 struct e1000_mac_info *mac = &hw->mac;
767 u32 reg_data;
768 s32 ret_val;
769 u16 i;
770 u16 rar_count = mac->rar_entry_count;
771
772 e1000_initialize_hw_bits_82571(hw);
773
774 /* Initialize identification LED */
775 ret_val = e1000e_id_led_init(hw);
776 if (ret_val) {
777 hw_dbg(hw, "Error initializing identification LED\n");
778 return ret_val;
779 }
780
781 /* Disabling VLAN filtering */
782 hw_dbg(hw, "Initializing the IEEE VLAN\n");
783 e1000e_clear_vfta(hw);
784
785 /* Setup the receive address. */
786 /* If, however, a locally administered address was assigned to the
787 * 82571, we must reserve a RAR for it to work around an issue where
788 * resetting one port will reload the MAC on the other port.
789 */
790 if (e1000e_get_laa_state_82571(hw))
791 rar_count--;
792 e1000e_init_rx_addrs(hw, rar_count);
793
794 /* Zero out the Multicast HASH table */
795 hw_dbg(hw, "Zeroing the MTA\n");
796 for (i = 0; i < mac->mta_reg_count; i++)
797 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
798
799 /* Setup link and flow control */
800 ret_val = e1000_setup_link_82571(hw);
801
802 /* Set the transmit descriptor write-back policy */
803 reg_data = er32(TXDCTL);
804 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
805 E1000_TXDCTL_FULL_TX_DESC_WB |
806 E1000_TXDCTL_COUNT_DESC;
807 ew32(TXDCTL, reg_data);
808
809 /* ...for both queues. */
810 if (mac->type != e1000_82573) {
811 reg_data = er32(TXDCTL1);
812 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
813 E1000_TXDCTL_FULL_TX_DESC_WB |
814 E1000_TXDCTL_COUNT_DESC;
815 ew32(TXDCTL1, reg_data);
816 } else {
817 e1000e_enable_tx_pkt_filtering(hw);
818 reg_data = er32(GCR);
819 reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
820 ew32(GCR, reg_data);
821 }
822
823 /* Clear all of the statistics registers (clear on read). It is
824 * important that we do this after we have tried to establish link
825 * because the symbol error count will increment wildly if there
826 * is no link.
827 */
828 e1000_clear_hw_cntrs_82571(hw);
829
830 return ret_val;
831}
832
833/**
834 * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
835 * @hw: pointer to the HW structure
836 *
837 * Initializes required hardware-dependent bits needed for normal operation.
838 **/
839static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
840{
841 u32 reg;
842
843 /* Transmit Descriptor Control 0 */
844 reg = er32(TXDCTL);
845 reg |= (1 << 22);
846 ew32(TXDCTL, reg);
847
848 /* Transmit Descriptor Control 1 */
849 reg = er32(TXDCTL1);
850 reg |= (1 << 22);
851 ew32(TXDCTL1, reg);
852
853 /* Transmit Arbitration Control 0 */
854 reg = er32(TARC0);
855 reg &= ~(0xF << 27); /* 30:27 */
856 switch (hw->mac.type) {
857 case e1000_82571:
858 case e1000_82572:
859 reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
860 break;
861 default:
862 break;
863 }
864 ew32(TARC0, reg);
865
866 /* Transmit Arbitration Control 1 */
867 reg = er32(TARC1);
868 switch (hw->mac.type) {
869 case e1000_82571:
870 case e1000_82572:
871 reg &= ~((1 << 29) | (1 << 30));
872 reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
873 if (er32(TCTL) & E1000_TCTL_MULR)
874 reg &= ~(1 << 28);
875 else
876 reg |= (1 << 28);
877 ew32(TARC1, reg);
878 break;
879 default:
880 break;
881 }
882
883 /* Device Control */
884 if (hw->mac.type == e1000_82573) {
885 reg = er32(CTRL);
886 reg &= ~(1 << 29);
887 ew32(CTRL, reg);
888 }
889
890 /* Extended Device Control */
891 if (hw->mac.type == e1000_82573) {
892 reg = er32(CTRL_EXT);
893 reg &= ~(1 << 23);
894 reg |= (1 << 22);
895 ew32(CTRL_EXT, reg);
896 }
897}
898
899/**
900 * e1000e_clear_vfta - Clear VLAN filter table
901 * @hw: pointer to the HW structure
902 *
903 * Clears the register array which contains the VLAN filter table by
904 * setting all the values to 0.
905 **/
906void e1000e_clear_vfta(struct e1000_hw *hw)
907{
908 u32 offset;
909 u32 vfta_value = 0;
910 u32 vfta_offset = 0;
911 u32 vfta_bit_in_reg = 0;
912
913 if (hw->mac.type == e1000_82573) {
914 if (hw->mng_cookie.vlan_id != 0) {
915 /* The VFTA is a 4096b bit-field, each identifying
916 * a single VLAN ID. The following operations
917 * determine which 32b entry (i.e. offset) into the
918 * array we want to set the VLAN ID (i.e. bit) of
919 * the manageability unit.
920 */
921 vfta_offset = (hw->mng_cookie.vlan_id >>
922 E1000_VFTA_ENTRY_SHIFT) &
923 E1000_VFTA_ENTRY_MASK;
924 vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
925 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
926 }
927 }
928 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
929 /* If the offset we want to clear is the same offset of the
930 * manageability VLAN ID, then clear all bits except that of
931 * the manageability unit.
932 */
933 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
934 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
935 e1e_flush();
936 }
937}
938
939/**
940 * e1000_mc_addr_list_update_82571 - Update Multicast addresses
941 * @hw: pointer to the HW structure
942 * @mc_addr_list: array of multicast addresses to program
943 * @mc_addr_count: number of multicast addresses to program
944 * @rar_used_count: the first RAR register free to program
945 * @rar_count: total number of supported Receive Address Registers
946 *
947 * Updates the Receive Address Registers and Multicast Table Array.
948 * The caller must have a packed mc_addr_list of multicast addresses.
949 * The parameter rar_count will usually be hw->mac.rar_entry_count
950 * unless there are workarounds that change this.
951 **/
952static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
953 u8 *mc_addr_list,
954 u32 mc_addr_count,
955 u32 rar_used_count,
956 u32 rar_count)
957{
958 if (e1000e_get_laa_state_82571(hw))
959 rar_count--;
960
961 e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count,
962 rar_used_count, rar_count);
963}
964
965/**
966 * e1000_setup_link_82571 - Setup flow control and link settings
967 * @hw: pointer to the HW structure
968 *
969 * Determines which flow control settings to use, then configures flow
970 * control. Calls the appropriate media-specific link configuration
971 * function. Assuming the adapter has a valid link partner, a valid link
972 * should be established. Assumes the hardware has previously been reset
973 * and the transmitter and receiver are not enabled.
974 **/
975static s32 e1000_setup_link_82571(struct e1000_hw *hw)
976{
977 /* 82573 does not have a word in the NVM to determine
978 * the default flow control setting, so we explicitly
979 * set it to full.
980 */
981 if (hw->mac.type == e1000_82573)
982 hw->mac.fc = e1000_fc_full;
983
984 return e1000e_setup_link(hw);
985}
986
987/**
988 * e1000_setup_copper_link_82571 - Configure copper link settings
989 * @hw: pointer to the HW structure
990 *
991 * Configures the link for auto-neg or forced speed and duplex. Then we check
992 * for link, once link is established calls to configure collision distance
993 * and flow control are called.
994 **/
995static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
996{
997 u32 ctrl;
998 u32 led_ctrl;
999 s32 ret_val;
1000
1001 ctrl = er32(CTRL);
1002 ctrl |= E1000_CTRL_SLU;
1003 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1004 ew32(CTRL, ctrl);
1005
1006 switch (hw->phy.type) {
1007 case e1000_phy_m88:
1008 ret_val = e1000e_copper_link_setup_m88(hw);
1009 break;
1010 case e1000_phy_igp_2:
1011 ret_val = e1000e_copper_link_setup_igp(hw);
1012 /* Setup activity LED */
1013 led_ctrl = er32(LEDCTL);
1014 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1015 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1016 ew32(LEDCTL, led_ctrl);
1017 break;
1018 default:
1019 return -E1000_ERR_PHY;
1020 break;
1021 }
1022
1023 if (ret_val)
1024 return ret_val;
1025
1026 ret_val = e1000e_setup_copper_link(hw);
1027
1028 return ret_val;
1029}
1030
1031/**
1032 * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
1033 * @hw: pointer to the HW structure
1034 *
1035 * Configures collision distance and flow control for fiber and serdes links.
1036 * Upon successful setup, poll for link.
1037 **/
1038static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1039{
1040 switch (hw->mac.type) {
1041 case e1000_82571:
1042 case e1000_82572:
1043 /* If SerDes loopback mode is entered, there is no form
1044 * of reset to take the adapter out of that mode. So we
1045 * have to explicitly take the adapter out of loopback
1046 * mode. This prevents drivers from twidling their thumbs
1047 * if another tool failed to take it out of loopback mode.
1048 */
1049 ew32(SCTL,
1050 E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1051 break;
1052 default:
1053 break;
1054 }
1055
1056 return e1000e_setup_fiber_serdes_link(hw);
1057}
1058
1059/**
1060 * e1000_valid_led_default_82571 - Verify a valid default LED config
1061 * @hw: pointer to the HW structure
1062 * @data: pointer to the NVM (EEPROM)
1063 *
1064 * Read the EEPROM for the current default LED configuration. If the
1065 * LED configuration is not valid, set to a valid LED configuration.
1066 **/
1067static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1068{
1069 s32 ret_val;
1070
1071 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1072 if (ret_val) {
1073 hw_dbg(hw, "NVM Read Error\n");
1074 return ret_val;
1075 }
1076
1077 if (hw->mac.type == e1000_82573 &&
1078 *data == ID_LED_RESERVED_F746)
1079 *data = ID_LED_DEFAULT_82573;
1080 else if (*data == ID_LED_RESERVED_0000 ||
1081 *data == ID_LED_RESERVED_FFFF)
1082 *data = ID_LED_DEFAULT;
1083
1084 return 0;
1085}
1086
1087/**
1088 * e1000e_get_laa_state_82571 - Get locally administered address state
1089 * @hw: pointer to the HW structure
1090 *
1091 * Retrieve and return the current locally administed address state.
1092 **/
1093bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
1094{
1095 if (hw->mac.type != e1000_82571)
1096 return 0;
1097
1098 return hw->dev_spec.e82571.laa_is_present;
1099}
1100
1101/**
1102 * e1000e_set_laa_state_82571 - Set locally administered address state
1103 * @hw: pointer to the HW structure
1104 * @state: enable/disable locally administered address
1105 *
1106 * Enable/Disable the current locally administed address state.
1107 **/
1108void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1109{
1110 if (hw->mac.type != e1000_82571)
1111 return;
1112
1113 hw->dev_spec.e82571.laa_is_present = state;
1114
1115 /* If workaround is activated... */
1116 if (state)
1117 /* Hold a copy of the LAA in RAR[14] This is done so that
1118 * between the time RAR[0] gets clobbered and the time it
1119 * gets fixed, the actual LAA is in one of the RARs and no
1120 * incoming packets directed to this port are dropped.
1121 * Eventually the LAA will be in RAR[0] and RAR[14].
1122 */
1123 e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
1124}
1125
1126/**
1127 * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
1128 * @hw: pointer to the HW structure
1129 *
1130 * Verifies that the EEPROM has completed the update. After updating the
1131 * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
1132 * the checksum fix is not implemented, we need to set the bit and update
1133 * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
1134 * we need to return bad checksum.
1135 **/
1136static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1137{
1138 struct e1000_nvm_info *nvm = &hw->nvm;
1139 s32 ret_val;
1140 u16 data;
1141
1142 if (nvm->type != e1000_nvm_flash_hw)
1143 return 0;
1144
1145 /* Check bit 4 of word 10h. If it is 0, firmware is done updating
1146 * 10h-12h. Checksum may need to be fixed.
1147 */
1148 ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
1149 if (ret_val)
1150 return ret_val;
1151
1152 if (!(data & 0x10)) {
1153 /* Read 0x23 and check bit 15. This bit is a 1
1154 * when the checksum has already been fixed. If
1155 * the checksum is still wrong and this bit is a
1156 * 1, we need to return bad checksum. Otherwise,
1157 * we need to set this bit to a 1 and update the
1158 * checksum.
1159 */
1160 ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
1161 if (ret_val)
1162 return ret_val;
1163
1164 if (!(data & 0x8000)) {
1165 data |= 0x8000;
1166 ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
1167 if (ret_val)
1168 return ret_val;
1169 ret_val = e1000e_update_nvm_checksum(hw);
1170 }
1171 }
1172
1173 return 0;
1174}
1175
1176/**
1177 * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
1178 * @hw: pointer to the HW structure
1179 *
1180 * Clears the hardware counters by reading the counter registers.
1181 **/
1182static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1183{
1184 u32 temp;
1185
1186 e1000e_clear_hw_cntrs_base(hw);
1187
1188 temp = er32(PRC64);
1189 temp = er32(PRC127);
1190 temp = er32(PRC255);
1191 temp = er32(PRC511);
1192 temp = er32(PRC1023);
1193 temp = er32(PRC1522);
1194 temp = er32(PTC64);
1195 temp = er32(PTC127);
1196 temp = er32(PTC255);
1197 temp = er32(PTC511);
1198 temp = er32(PTC1023);
1199 temp = er32(PTC1522);
1200
1201 temp = er32(ALGNERRC);
1202 temp = er32(RXERRC);
1203 temp = er32(TNCRS);
1204 temp = er32(CEXTERR);
1205 temp = er32(TSCTC);
1206 temp = er32(TSCTFC);
1207
1208 temp = er32(MGTPRC);
1209 temp = er32(MGTPDC);
1210 temp = er32(MGTPTC);
1211
1212 temp = er32(IAC);
1213 temp = er32(ICRXOC);
1214
1215 temp = er32(ICRXPTC);
1216 temp = er32(ICRXATC);
1217 temp = er32(ICTXPTC);
1218 temp = er32(ICTXATC);
1219 temp = er32(ICTXQEC);
1220 temp = er32(ICTXQMTC);
1221 temp = er32(ICRXDMTC);
1222}
1223
1224static struct e1000_mac_operations e82571_mac_ops = {
1225 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
1226 /* .check_for_link: media type dependent */
1227 .cleanup_led = e1000e_cleanup_led_generic,
1228 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1229 .get_bus_info = e1000e_get_bus_info_pcie,
1230 /* .get_link_up_info: media type dependent */
1231 .led_on = e1000e_led_on_generic,
1232 .led_off = e1000e_led_off_generic,
1233 .mc_addr_list_update = e1000_mc_addr_list_update_82571,
1234 .reset_hw = e1000_reset_hw_82571,
1235 .init_hw = e1000_init_hw_82571,
1236 .setup_link = e1000_setup_link_82571,
1237 /* .setup_physical_interface: media type dependent */
1238};
1239
1240static struct e1000_phy_operations e82_phy_ops_igp = {
1241 .acquire_phy = e1000_get_hw_semaphore_82571,
1242 .check_reset_block = e1000e_check_reset_block_generic,
1243 .commit_phy = NULL,
1244 .force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
1245 .get_cfg_done = e1000_get_cfg_done_82571,
1246 .get_cable_length = e1000e_get_cable_length_igp_2,
1247 .get_phy_info = e1000e_get_phy_info_igp,
1248 .read_phy_reg = e1000e_read_phy_reg_igp,
1249 .release_phy = e1000_put_hw_semaphore_82571,
1250 .reset_phy = e1000e_phy_hw_reset_generic,
1251 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1252 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1253 .write_phy_reg = e1000e_write_phy_reg_igp,
1254};
1255
1256static struct e1000_phy_operations e82_phy_ops_m88 = {
1257 .acquire_phy = e1000_get_hw_semaphore_82571,
1258 .check_reset_block = e1000e_check_reset_block_generic,
1259 .commit_phy = e1000e_phy_sw_reset,
1260 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1261 .get_cfg_done = e1000e_get_cfg_done,
1262 .get_cable_length = e1000e_get_cable_length_m88,
1263 .get_phy_info = e1000e_get_phy_info_m88,
1264 .read_phy_reg = e1000e_read_phy_reg_m88,
1265 .release_phy = e1000_put_hw_semaphore_82571,
1266 .reset_phy = e1000e_phy_hw_reset_generic,
1267 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1268 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1269 .write_phy_reg = e1000e_write_phy_reg_m88,
1270};
1271
1272static struct e1000_nvm_operations e82571_nvm_ops = {
1273 .acquire_nvm = e1000_acquire_nvm_82571,
1274 .read_nvm = e1000e_read_nvm_spi,
1275 .release_nvm = e1000_release_nvm_82571,
1276 .update_nvm = e1000_update_nvm_checksum_82571,
1277 .valid_led_default = e1000_valid_led_default_82571,
1278 .validate_nvm = e1000_validate_nvm_checksum_82571,
1279 .write_nvm = e1000_write_nvm_82571,
1280};
1281
1282static struct e1000_nvm_operations e82573_nvm_ops = {
1283 .acquire_nvm = e1000_acquire_nvm_82571,
1284 .read_nvm = e1000e_read_nvm_eerd,
1285 .release_nvm = e1000_release_nvm_82571,
1286 .update_nvm = e1000_update_nvm_checksum_82571,
1287 .valid_led_default = e1000_valid_led_default_82571,
1288 .validate_nvm = e1000_validate_nvm_checksum_82571,
1289 .write_nvm = e1000_write_nvm_82571,
1290};
1291
1292struct e1000_info e1000_82571_info = {
1293 .mac = e1000_82571,
1294 .flags = FLAG_HAS_HW_VLAN_FILTER
1295 | FLAG_HAS_JUMBO_FRAMES
1296 | FLAG_HAS_STATS_PTC_PRC
1297 | FLAG_HAS_WOL
1298 | FLAG_APME_IN_CTRL3
1299 | FLAG_RX_CSUM_ENABLED
1300 | FLAG_HAS_CTRLEXT_ON_LOAD
1301 | FLAG_HAS_STATS_ICR_ICT
1302 | FLAG_HAS_SMART_POWER_DOWN
1303 | FLAG_RESET_OVERWRITES_LAA /* errata */
1304 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1305 | FLAG_APME_CHECK_PORT_B,
1306 .pba = 38,
1307 .get_invariants = e1000_get_invariants_82571,
1308 .mac_ops = &e82571_mac_ops,
1309 .phy_ops = &e82_phy_ops_igp,
1310 .nvm_ops = &e82571_nvm_ops,
1311};
1312
1313struct e1000_info e1000_82572_info = {
1314 .mac = e1000_82572,
1315 .flags = FLAG_HAS_HW_VLAN_FILTER
1316 | FLAG_HAS_JUMBO_FRAMES
1317 | FLAG_HAS_STATS_PTC_PRC
1318 | FLAG_HAS_WOL
1319 | FLAG_APME_IN_CTRL3
1320 | FLAG_RX_CSUM_ENABLED
1321 | FLAG_HAS_CTRLEXT_ON_LOAD
1322 | FLAG_HAS_STATS_ICR_ICT
1323 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1324 .pba = 38,
1325 .get_invariants = e1000_get_invariants_82571,
1326 .mac_ops = &e82571_mac_ops,
1327 .phy_ops = &e82_phy_ops_igp,
1328 .nvm_ops = &e82571_nvm_ops,
1329};
1330
1331struct e1000_info e1000_82573_info = {
1332 .mac = e1000_82573,
1333 .flags = FLAG_HAS_HW_VLAN_FILTER
1334 | FLAG_HAS_JUMBO_FRAMES
1335 | FLAG_HAS_STATS_PTC_PRC
1336 | FLAG_HAS_WOL
1337 | FLAG_APME_IN_CTRL3
1338 | FLAG_RX_CSUM_ENABLED
1339 | FLAG_HAS_STATS_ICR_ICT
1340 | FLAG_HAS_SMART_POWER_DOWN
1341 | FLAG_HAS_AMT
1342 | FLAG_HAS_ASPM
1343 | FLAG_HAS_ERT
1344 | FLAG_HAS_SWSM_ON_LOAD,
1345 .pba = 20,
1346 .get_invariants = e1000_get_invariants_82571,
1347 .mac_ops = &e82571_mac_ops,
1348 .phy_ops = &e82_phy_ops_m88,
1349 .nvm_ops = &e82573_nvm_ops,
1350};
1351
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
new file mode 100644
index 000000000000..650f866e7ac2
--- /dev/null
+++ b/drivers/net/e1000e/Makefile
@@ -0,0 +1,37 @@
1################################################################################
2#
3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29#
30# Makefile for the Intel(R) PRO/1000 ethernet driver
31#
32
33obj-$(CONFIG_E1000E) += e1000e.o
34
35e1000e-objs := 82571.o ich8lan.o es2lan.o \
36 lib.o phy.o param.o ethtool.o netdev.o
37
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
new file mode 100644
index 000000000000..b32ed45b4b34
--- /dev/null
+++ b/drivers/net/e1000e/defines.h
@@ -0,0 +1,739 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _E1000_DEFINES_H_
30#define _E1000_DEFINES_H_
31
32#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
33#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
34#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
35#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
36#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
37#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
38#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
39#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
40#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
41#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
42#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
43#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
44#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
45#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
46#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
47#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
48#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
49#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
50
51/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
52#define REQ_TX_DESCRIPTOR_MULTIPLE 8
53#define REQ_RX_DESCRIPTOR_MULTIPLE 8
54
55/* Definitions for power management and wakeup registers */
56/* Wake Up Control */
57#define E1000_WUC_APME 0x00000001 /* APM Enable */
58#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
59
60/* Wake Up Filter Control */
61#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
62#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
63#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
64#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
65#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
66
67/* Extended Device Control */
68#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
69#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
70#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
71#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
72#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
73#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
74#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
75#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
76
77/* Receive Decriptor bit definitions */
78#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
79#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
80#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
81#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
82#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
83#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
84#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
85#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
86#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
87#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
88#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
89#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
90#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
91
92#define E1000_RXDEXT_STATERR_CE 0x01000000
93#define E1000_RXDEXT_STATERR_SE 0x02000000
94#define E1000_RXDEXT_STATERR_SEQ 0x04000000
95#define E1000_RXDEXT_STATERR_CXE 0x10000000
96#define E1000_RXDEXT_STATERR_RXE 0x80000000
97
98/* mask to determine if packets should be dropped due to frame errors */
99#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
100 E1000_RXD_ERR_CE | \
101 E1000_RXD_ERR_SE | \
102 E1000_RXD_ERR_SEQ | \
103 E1000_RXD_ERR_CXE | \
104 E1000_RXD_ERR_RXE)
105
106/* Same mask, but for extended and packet split descriptors */
107#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
108 E1000_RXDEXT_STATERR_CE | \
109 E1000_RXDEXT_STATERR_SE | \
110 E1000_RXDEXT_STATERR_SEQ | \
111 E1000_RXDEXT_STATERR_CXE | \
112 E1000_RXDEXT_STATERR_RXE)
113
114#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
115
116/* Management Control */
117#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
118#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
119#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
120#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
121#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
122#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
123 * filtering */
124#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
125 * memory */
126
127/* Receive Control */
128#define E1000_RCTL_EN 0x00000002 /* enable */
129#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
130#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
131#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
132#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
133#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
134#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
135#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
136#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
137#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
138#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
139#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
140/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
141#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
142#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
143#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
144#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
145/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
146#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
147#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
148#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
149#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
150#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
151#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
152#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
153#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
154
155/* Use byte values for the following shift parameters
156 * Usage:
157 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
158 * E1000_PSRCTL_BSIZE0_MASK) |
159 * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
160 * E1000_PSRCTL_BSIZE1_MASK) |
161 * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
162 * E1000_PSRCTL_BSIZE2_MASK) |
163 * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
164 * E1000_PSRCTL_BSIZE3_MASK))
165 * where value0 = [128..16256], default=256
166 * value1 = [1024..64512], default=4096
167 * value2 = [0..64512], default=4096
168 * value3 = [0..64512], default=0
169 */
170
171#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
172#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
173#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
174#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
175
176#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
177#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
178#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
179#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
180
181/* SWFW_SYNC Definitions */
182#define E1000_SWFW_EEP_SM 0x1
183#define E1000_SWFW_PHY0_SM 0x2
184#define E1000_SWFW_PHY1_SM 0x4
185
186/* Device Control */
187#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
188#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
189#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
190#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
191#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
192#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
193#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
194#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
195#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
196#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
197#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
198#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
199#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
200#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
201#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
202#define E1000_CTRL_RST 0x04000000 /* Global reset */
203#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
204#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
205#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
206#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
207
208/* Bit definitions for the Management Data IO (MDIO) and Management Data
209 * Clock (MDC) pins in the Device Control Register.
210 */
211
212/* Device Status */
213#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
214#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
215#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
216#define E1000_STATUS_FUNC_SHIFT 2
217#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
218#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
219#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
220#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
221#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
222#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
223#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
224
225/* Constants used to intrepret the masked PCI-X bus speed. */
226
227#define HALF_DUPLEX 1
228#define FULL_DUPLEX 2
229
230
231#define ADVERTISE_10_HALF 0x0001
232#define ADVERTISE_10_FULL 0x0002
233#define ADVERTISE_100_HALF 0x0004
234#define ADVERTISE_100_FULL 0x0008
235#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
236#define ADVERTISE_1000_FULL 0x0020
237
238/* 1000/H is not supported, nor spec-compliant. */
239#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
240 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
241 ADVERTISE_1000_FULL)
242#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
243 ADVERTISE_100_HALF | ADVERTISE_100_FULL)
244#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
245#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
246#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
247
248#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
249
250/* LED Control */
251#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
252#define E1000_LEDCTL_LED0_MODE_SHIFT 0
253#define E1000_LEDCTL_LED0_IVRT 0x00000040
254#define E1000_LEDCTL_LED0_BLINK 0x00000080
255
256#define E1000_LEDCTL_MODE_LED_ON 0xE
257#define E1000_LEDCTL_MODE_LED_OFF 0xF
258
259/* Transmit Descriptor bit definitions */
260#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
261#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
262#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
263#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
264#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
265#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
266#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
267#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
268#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
269#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
270#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
271#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
272#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
273#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
274#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
275#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
276#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
277#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
278#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
279
280/* Transmit Control */
281#define E1000_TCTL_EN 0x00000002 /* enable tx */
282#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
283#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
284#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
285#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
286#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
287
288/* Transmit Arbitration Count */
289
290/* SerDes Control */
291#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
292
293/* Receive Checksum Control */
294#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
295#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
296
297/* Header split receive */
298#define E1000_RFCTL_EXTEN 0x00008000
299#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
300#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
301
302/* Collision related configuration parameters */
303#define E1000_COLLISION_THRESHOLD 15
304#define E1000_CT_SHIFT 4
305#define E1000_COLLISION_DISTANCE 63
306#define E1000_COLD_SHIFT 12
307
308/* Default values for the transmit IPG register */
309#define DEFAULT_82543_TIPG_IPGT_COPPER 8
310
311#define E1000_TIPG_IPGT_MASK 0x000003FF
312
313#define DEFAULT_82543_TIPG_IPGR1 8
314#define E1000_TIPG_IPGR1_SHIFT 10
315
316#define DEFAULT_82543_TIPG_IPGR2 6
317#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
318#define E1000_TIPG_IPGR2_SHIFT 20
319
320#define MAX_JUMBO_FRAME_SIZE 0x3F00
321
322/* Extended Configuration Control and Size */
323#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
324#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
325#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
326#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
327#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
328#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
329#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
330
331#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
332#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
333#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
334#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
335
336#define E1000_KABGTXD_BGSQLBIAS 0x00050000
337
338/* PBA constants */
339#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
340#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
341
342#define E1000_PBS_16K E1000_PBA_16K
343
344#define IFS_MAX 80
345#define IFS_MIN 40
346#define IFS_RATIO 4
347#define IFS_STEP 10
348#define MIN_NUM_XMITS 1000
349
350/* SW Semaphore Register */
351#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
352#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
353#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
354
355/* Interrupt Cause Read */
356#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
357#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
358#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
359#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
360#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
361#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
362
363/* This defines the bits that are set in the Interrupt Mask
364 * Set/Read Register. Each bit is documented below:
365 * o RXT0 = Receiver Timer Interrupt (ring 0)
366 * o TXDW = Transmit Descriptor Written Back
367 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
368 * o RXSEQ = Receive Sequence Error
369 * o LSC = Link Status Change
370 */
371#define IMS_ENABLE_MASK ( \
372 E1000_IMS_RXT0 | \
373 E1000_IMS_TXDW | \
374 E1000_IMS_RXDMT0 | \
375 E1000_IMS_RXSEQ | \
376 E1000_IMS_LSC)
377
378/* Interrupt Mask Set */
379#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
380#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
381#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
382#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
383#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
384
385/* Interrupt Cause Set */
386#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
387#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
388
389/* Transmit Descriptor Control */
390#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
391#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
392#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
393#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
394#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
395 still to be processed. */
396
397/* Flow Control Constants */
398#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
399#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
400#define FLOW_CONTROL_TYPE 0x8808
401
402/* 802.1q VLAN Packet Size */
403#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
404
405/* Receive Address */
406/* Number of high/low register pairs in the RAR. The RAR (Receive Address
407 * Registers) holds the directed and multicast addresses that we monitor.
408 * Technically, we have 16 spots. However, we reserve one of these spots
409 * (RAR[15]) for our directed address used by controllers with
410 * manageability enabled, allowing us room for 15 multicast addresses.
411 */
412#define E1000_RAR_ENTRIES 15
413#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
414
415/* Error Codes */
416#define E1000_ERR_NVM 1
417#define E1000_ERR_PHY 2
418#define E1000_ERR_CONFIG 3
419#define E1000_ERR_PARAM 4
420#define E1000_ERR_MAC_INIT 5
421#define E1000_ERR_PHY_TYPE 6
422#define E1000_ERR_RESET 9
423#define E1000_ERR_MASTER_REQUESTS_PENDING 10
424#define E1000_ERR_HOST_INTERFACE_COMMAND 11
425#define E1000_BLK_PHY_RESET 12
426#define E1000_ERR_SWFW_SYNC 13
427#define E1000_NOT_IMPLEMENTED 14
428
429/* Loop limit on how long we wait for auto-negotiation to complete */
430#define FIBER_LINK_UP_LIMIT 50
431#define COPPER_LINK_UP_LIMIT 10
432#define PHY_AUTO_NEG_LIMIT 45
433#define PHY_FORCE_LIMIT 20
434/* Number of 100 microseconds we wait for PCI Express master disable */
435#define MASTER_DISABLE_TIMEOUT 800
436/* Number of milliseconds we wait for PHY configuration done after MAC reset */
437#define PHY_CFG_TIMEOUT 100
438/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
439#define MDIO_OWNERSHIP_TIMEOUT 10
440/* Number of milliseconds for NVM auto read done after MAC reset. */
441#define AUTO_READ_DONE_TIMEOUT 10
442
443/* Flow Control */
444#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
445
446/* Transmit Configuration Word */
447#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
448#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
449#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
450#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
451#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
452
453/* Receive Configuration Word */
454#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
455#define E1000_RXCW_C 0x20000000 /* Receive config */
456#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
457
458/* PCI Express Control */
459#define E1000_GCR_RXD_NO_SNOOP 0x00000001
460#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
461#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
462#define E1000_GCR_TXD_NO_SNOOP 0x00000008
463#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
464#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
465
466#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
467 E1000_GCR_RXDSCW_NO_SNOOP | \
468 E1000_GCR_RXDSCR_NO_SNOOP | \
469 E1000_GCR_TXD_NO_SNOOP | \
470 E1000_GCR_TXDSCW_NO_SNOOP | \
471 E1000_GCR_TXDSCR_NO_SNOOP)
472
473/* PHY Control Register */
474#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
475#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
476#define MII_CR_POWER_DOWN 0x0800 /* Power down */
477#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
478#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
479#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
480#define MII_CR_SPEED_1000 0x0040
481#define MII_CR_SPEED_100 0x2000
482#define MII_CR_SPEED_10 0x0000
483
484/* PHY Status Register */
485#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
486#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
487
488/* Autoneg Advertisement Register */
489#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
490#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
491#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
492#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
493#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
494#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
495
496/* Link Partner Ability Register (Base Page) */
497#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
498#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
499
500/* Autoneg Expansion Register */
501
502/* 1000BASE-T Control Register */
503#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
504#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
505 /* 0=DTE device */
506#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
507 /* 0=Configure PHY as Slave */
508#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
509 /* 0=Automatic Master/Slave config */
510
511/* 1000BASE-T Status Register */
512#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
513#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
514
515
516/* PHY 1000 MII Register/Bit Definitions */
517/* PHY Registers defined by IEEE */
518#define PHY_CONTROL 0x00 /* Control Register */
519#define PHY_STATUS 0x01 /* Status Regiser */
520#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
521#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
522#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
523#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
524#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
525#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
526
527/* NVM Control */
528#define E1000_EECD_SK 0x00000001 /* NVM Clock */
529#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
530#define E1000_EECD_DI 0x00000004 /* NVM Data In */
531#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
532#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
533#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
534#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
535#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type
536 * (0-small, 1-large) */
537#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
538#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
539#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
540#define E1000_EECD_SIZE_EX_SHIFT 11
541#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
542#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
543#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
544
545#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
546#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
547#define E1000_NVM_RW_REG_START 1 /* Start operation */
548#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
549#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
550#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
551#define E1000_FLASH_UPDATES 2000
552
553/* NVM Word Offsets */
554#define NVM_ID_LED_SETTINGS 0x0004
555#define NVM_INIT_CONTROL2_REG 0x000F
556#define NVM_INIT_CONTROL3_PORT_B 0x0014
557#define NVM_INIT_3GIO_3 0x001A
558#define NVM_INIT_CONTROL3_PORT_A 0x0024
559#define NVM_CFG 0x0012
560#define NVM_CHECKSUM_REG 0x003F
561
562#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
563#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
564
565/* Mask bits for fields in Word 0x0f of the NVM */
566#define NVM_WORD0F_PAUSE_MASK 0x3000
567#define NVM_WORD0F_PAUSE 0x1000
568#define NVM_WORD0F_ASM_DIR 0x2000
569
570/* Mask bits for fields in Word 0x1a of the NVM */
571#define NVM_WORD1A_ASPM_MASK 0x000C
572
573/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
574#define NVM_SUM 0xBABA
575
576/* PBA (printed board assembly) number words */
577#define NVM_PBA_OFFSET_0 8
578#define NVM_PBA_OFFSET_1 9
579
580#define NVM_WORD_SIZE_BASE_SHIFT 6
581
582/* NVM Commands - SPI */
583#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
584#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
585#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
586#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
587#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
588#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
589
590/* SPI NVM Status Register */
591#define NVM_STATUS_RDY_SPI 0x01
592
593/* Word definitions for ID LED Settings */
594#define ID_LED_RESERVED_0000 0x0000
595#define ID_LED_RESERVED_FFFF 0xFFFF
596#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
597 (ID_LED_OFF1_OFF2 << 8) | \
598 (ID_LED_DEF1_DEF2 << 4) | \
599 (ID_LED_DEF1_DEF2))
600#define ID_LED_DEF1_DEF2 0x1
601#define ID_LED_DEF1_ON2 0x2
602#define ID_LED_DEF1_OFF2 0x3
603#define ID_LED_ON1_DEF2 0x4
604#define ID_LED_ON1_ON2 0x5
605#define ID_LED_ON1_OFF2 0x6
606#define ID_LED_OFF1_DEF2 0x7
607#define ID_LED_OFF1_ON2 0x8
608#define ID_LED_OFF1_OFF2 0x9
609
610#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
611#define IGP_ACTIVITY_LED_ENABLE 0x0300
612#define IGP_LED3_MODE 0x07000000
613
614/* PCI/PCI-X/PCI-EX Config space */
615#define PCI_HEADER_TYPE_REGISTER 0x0E
616#define PCIE_LINK_STATUS 0x12
617
618#define PCI_HEADER_TYPE_MULTIFUNC 0x80
619#define PCIE_LINK_WIDTH_MASK 0x3F0
620#define PCIE_LINK_WIDTH_SHIFT 4
621
622#define PHY_REVISION_MASK 0xFFFFFFF0
623#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
624#define MAX_PHY_MULTI_PAGE_REG 0xF
625
626/* Bit definitions for valid PHY IDs. */
627/* I = Integrated
628 * E = External
629 */
630#define M88E1000_E_PHY_ID 0x01410C50
631#define M88E1000_I_PHY_ID 0x01410C30
632#define M88E1011_I_PHY_ID 0x01410C20
633#define IGP01E1000_I_PHY_ID 0x02A80380
634#define M88E1111_I_PHY_ID 0x01410CC0
635#define GG82563_E_PHY_ID 0x01410CA0
636#define IGP03E1000_E_PHY_ID 0x02A80390
637#define IFE_E_PHY_ID 0x02A80330
638#define IFE_PLUS_E_PHY_ID 0x02A80320
639#define IFE_C_E_PHY_ID 0x02A80310
640
641/* M88E1000 Specific Registers */
642#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
643#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
644#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
645
646#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
647#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
648
649/* M88E1000 PHY Specific Control Register */
650#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
651#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
652 /* Manual MDI configuration */
653#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
654#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
655 * 100BASE-TX/10BASE-T:
656 * MDI Mode
657 */
658#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
659 * all speeds.
660 */
661 /* 1=Enable Extended 10BASE-T distance
662 * (Lower 10BASE-T RX Threshold)
663 * 0=Normal 10BASE-T RX Threshold */
664 /* 1=5-Bit interface in 100BASE-TX
665 * 0=MII interface in 100BASE-TX */
666#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
667
668/* M88E1000 PHY Specific Status Register */
669#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
670#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
671#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
672#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
673 * 3=110-140M;4=>140M */
674#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
675#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
676
677#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
678
679/* Number of times we will attempt to autonegotiate before downshifting if we
680 * are the master */
681#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
682#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
683/* Number of times we will attempt to autonegotiate before downshifting if we
684 * are the slave */
685#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
686#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
687#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
688
689/* M88EC018 Rev 2 specific DownShift settings */
690#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
691#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
692
693/* Bits...
694 * 15-5: page
695 * 4-0: register offset
696 */
697#define GG82563_PAGE_SHIFT 5
698#define GG82563_REG(page, reg) \
699 (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
700#define GG82563_MIN_ALT_REG 30
701
702/* GG82563 Specific Registers */
703#define GG82563_PHY_SPEC_CTRL \
704 GG82563_REG(0, 16) /* PHY Specific Control */
705#define GG82563_PHY_PAGE_SELECT \
706 GG82563_REG(0, 22) /* Page Select */
707#define GG82563_PHY_SPEC_CTRL_2 \
708 GG82563_REG(0, 26) /* PHY Specific Control 2 */
709#define GG82563_PHY_PAGE_SELECT_ALT \
710 GG82563_REG(0, 29) /* Alternate Page Select */
711
712#define GG82563_PHY_MAC_SPEC_CTRL \
713 GG82563_REG(2, 21) /* MAC Specific Control Register */
714
715#define GG82563_PHY_DSP_DISTANCE \
716 GG82563_REG(5, 26) /* DSP Distance */
717
718/* Page 193 - Port Control Registers */
719#define GG82563_PHY_KMRN_MODE_CTRL \
720 GG82563_REG(193, 16) /* Kumeran Mode Control */
721#define GG82563_PHY_PWR_MGMT_CTRL \
722 GG82563_REG(193, 20) /* Power Management Control */
723
724/* Page 194 - KMRN Registers */
725#define GG82563_PHY_INBAND_CTRL \
726 GG82563_REG(194, 18) /* Inband Control */
727
728/* MDI Control */
729#define E1000_MDIC_REG_SHIFT 16
730#define E1000_MDIC_PHY_SHIFT 21
731#define E1000_MDIC_OP_WRITE 0x04000000
732#define E1000_MDIC_OP_READ 0x08000000
733#define E1000_MDIC_READY 0x10000000
734#define E1000_MDIC_ERROR 0x40000000
735
736/* SerDes Control */
737#define E1000_GEN_POLL_TIMEOUT 640
738
739#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
new file mode 100644
index 000000000000..d2499bb07c13
--- /dev/null
+++ b/drivers/net/e1000e/e1000.h
@@ -0,0 +1,514 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* Linux PRO/1000 Ethernet Driver main header file */
30
31#ifndef _E1000_H_
32#define _E1000_H_
33
34#include <linux/types.h>
35#include <linux/timer.h>
36#include <linux/workqueue.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39
40#include "hw.h"
41
42struct e1000_info;
43
44#define ndev_printk(level, netdev, format, arg...) \
45 printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \
46 (netdev)->name, ## arg)
47
48#ifdef DEBUG
49#define ndev_dbg(netdev, format, arg...) \
50 ndev_printk(KERN_DEBUG , netdev, format, ## arg)
51#else
52#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0)
53#endif
54
55#define ndev_err(netdev, format, arg...) \
56 ndev_printk(KERN_ERR , netdev, format, ## arg)
57#define ndev_info(netdev, format, arg...) \
58 ndev_printk(KERN_INFO , netdev, format, ## arg)
59#define ndev_warn(netdev, format, arg...) \
60 ndev_printk(KERN_WARNING , netdev, format, ## arg)
61#define ndev_notice(netdev, format, arg...) \
62 ndev_printk(KERN_NOTICE , netdev, format, ## arg)
63
64
65/* TX/RX descriptor defines */
66#define E1000_DEFAULT_TXD 256
67#define E1000_MAX_TXD 4096
68#define E1000_MIN_TXD 80
69
70#define E1000_DEFAULT_RXD 256
71#define E1000_MAX_RXD 4096
72#define E1000_MIN_RXD 80
73
74/* Early Receive defines */
75#define E1000_ERT_2048 0x100
76
77#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
78
79/* How many Tx Descriptors do we need to call netif_wake_queue ? */
80/* How many Rx Buffers do we bundle into one write to the hardware ? */
81#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
82
83#define AUTO_ALL_MODES 0
84#define E1000_EEPROM_APME 0x0400
85
86#define E1000_MNG_VLAN_NONE (-1)
87
88/* Number of packet split data buffers (not including the header buffer) */
89#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
90
91enum e1000_boards {
92 board_82571,
93 board_82572,
94 board_82573,
95 board_80003es2lan,
96 board_ich8lan,
97 board_ich9lan,
98};
99
100struct e1000_queue_stats {
101 u64 packets;
102 u64 bytes;
103};
104
105struct e1000_ps_page {
106 struct page *page;
107 u64 dma; /* must be u64 - written to hw */
108};
109
110/*
111 * wrappers around a pointer to a socket buffer,
112 * so a DMA handle can be stored along with the buffer
113 */
114struct e1000_buffer {
115 dma_addr_t dma;
116 struct sk_buff *skb;
117 union {
118 /* TX */
119 struct {
120 unsigned long time_stamp;
121 u16 length;
122 u16 next_to_watch;
123 };
124 /* RX */
125 struct page *page;
126 };
127
128};
129
130struct e1000_ring {
131 void *desc; /* pointer to ring memory */
132 dma_addr_t dma; /* phys address of ring */
133 unsigned int size; /* length of ring in bytes */
134 unsigned int count; /* number of desc. in ring */
135
136 u16 next_to_use;
137 u16 next_to_clean;
138
139 u16 head;
140 u16 tail;
141
142 /* array of buffer information structs */
143 struct e1000_buffer *buffer_info;
144
145 /* arrays of page information for packet split */
146 struct e1000_ps_page *ps_pages;
147 struct sk_buff *rx_skb_top;
148
149 struct e1000_queue_stats stats;
150};
151
152/* board specific private data structure */
153struct e1000_adapter {
154 struct timer_list watchdog_timer;
155 struct timer_list phy_info_timer;
156 struct timer_list blink_timer;
157
158 struct work_struct reset_task;
159 struct work_struct watchdog_task;
160
161 const struct e1000_info *ei;
162
163 struct vlan_group *vlgrp;
164 u32 bd_number;
165 u32 rx_buffer_len;
166 u16 mng_vlan_id;
167 u16 link_speed;
168 u16 link_duplex;
169
170 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
171
172 /* this is still needed for 82571 and above */
173 atomic_t irq_sem;
174
175 /* track device up/down/testing state */
176 unsigned long state;
177
178 /* Interrupt Throttle Rate */
179 u32 itr;
180 u32 itr_setting;
181 u16 tx_itr;
182 u16 rx_itr;
183
184 /*
185 * TX
186 */
187 struct e1000_ring *tx_ring /* One per active queue */
188 ____cacheline_aligned_in_smp;
189
190 struct napi_struct napi;
191
192 unsigned long tx_queue_len;
193 unsigned int restart_queue;
194 u32 txd_cmd;
195
196 bool detect_tx_hung;
197 u8 tx_timeout_factor;
198
199 u32 tx_int_delay;
200 u32 tx_abs_int_delay;
201
202 unsigned int total_tx_bytes;
203 unsigned int total_tx_packets;
204 unsigned int total_rx_bytes;
205 unsigned int total_rx_packets;
206
207 /* TX stats */
208 u64 tpt_old;
209 u64 colc_old;
210 u64 gotcl_old;
211 u32 gotcl;
212 u32 tx_timeout_count;
213 u32 tx_fifo_head;
214 u32 tx_head_addr;
215 u32 tx_fifo_size;
216 u32 tx_dma_failed;
217
218 /*
219 * RX
220 */
221 bool (*clean_rx) (struct e1000_adapter *adapter,
222 int *work_done, int work_to_do)
223 ____cacheline_aligned_in_smp;
224 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
225 int cleaned_count);
226 struct e1000_ring *rx_ring;
227
228 u32 rx_int_delay;
229 u32 rx_abs_int_delay;
230
231 /* RX stats */
232 u64 hw_csum_err;
233 u64 hw_csum_good;
234 u64 rx_hdr_split;
235 u64 gorcl_old;
236 u32 gorcl;
237 u32 alloc_rx_buff_failed;
238 u32 rx_dma_failed;
239
240 unsigned int rx_ps_pages;
241 u16 rx_ps_bsize0;
242
243 /* OS defined structs */
244 struct net_device *netdev;
245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247 spinlock_t stats_lock; /* prevent concurrent stats updates */
248
249 /* structs defined in e1000_hw.h */
250 struct e1000_hw hw;
251
252 struct e1000_hw_stats stats;
253 struct e1000_phy_info phy_info;
254 struct e1000_phy_stats phy_stats;
255
256 struct e1000_ring test_tx_ring;
257 struct e1000_ring test_rx_ring;
258 u32 test_icr;
259
260 u32 msg_enable;
261
262 u32 eeprom_wol;
263 u32 wol;
264 u32 pba;
265
266 u8 fc_autoneg;
267
268 unsigned long led_status;
269
270 unsigned int flags;
271};
272
273struct e1000_info {
274 enum e1000_mac_type mac;
275 unsigned int flags;
276 u32 pba;
277 s32 (*get_invariants)(struct e1000_adapter *);
278 struct e1000_mac_operations *mac_ops;
279 struct e1000_phy_operations *phy_ops;
280 struct e1000_nvm_operations *nvm_ops;
281};
282
283/* hardware capability, feature, and workaround flags */
284#define FLAG_HAS_AMT (1 << 0)
285#define FLAG_HAS_FLASH (1 << 1)
286#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
287#define FLAG_HAS_WOL (1 << 3)
288#define FLAG_HAS_ERT (1 << 4)
289#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
290#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
291#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
292#define FLAG_HAS_ASPM (1 << 8)
293#define FLAG_HAS_STATS_ICR_ICT (1 << 9)
294#define FLAG_HAS_STATS_PTC_PRC (1 << 10)
295#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
296#define FLAG_IS_QUAD_PORT_A (1 << 12)
297#define FLAG_IS_QUAD_PORT (1 << 13)
298#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
299#define FLAG_APME_IN_WUC (1 << 15)
300#define FLAG_APME_IN_CTRL3 (1 << 16)
301#define FLAG_APME_CHECK_PORT_B (1 << 17)
302#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
303#define FLAG_NO_WAKE_UCAST (1 << 19)
304#define FLAG_MNG_PT_ENABLED (1 << 20)
305#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
306#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
307#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
308#define FLAG_RX_NEEDS_RESTART (1 << 24)
309#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
310#define FLAG_SMART_POWER_DOWN (1 << 26)
311#define FLAG_MSI_ENABLED (1 << 27)
312#define FLAG_RX_CSUM_ENABLED (1 << 28)
313#define FLAG_TSO_FORCE (1 << 29)
314
315#define E1000_RX_DESC_PS(R, i) \
316 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
317#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
318#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
319#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
320#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
321
322enum e1000_state_t {
323 __E1000_TESTING,
324 __E1000_RESETTING,
325 __E1000_DOWN
326};
327
328enum latency_range {
329 lowest_latency = 0,
330 low_latency = 1,
331 bulk_latency = 2,
332 latency_invalid = 255
333};
334
335extern char e1000e_driver_name[];
336extern const char e1000e_driver_version[];
337
338extern void e1000e_check_options(struct e1000_adapter *adapter);
339extern void e1000e_set_ethtool_ops(struct net_device *netdev);
340
341extern int e1000e_up(struct e1000_adapter *adapter);
342extern void e1000e_down(struct e1000_adapter *adapter);
343extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
344extern void e1000e_reset(struct e1000_adapter *adapter);
345extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
346extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
347extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
348extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
349extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
350extern void e1000e_update_stats(struct e1000_adapter *adapter);
351
352extern unsigned int copybreak;
353
354extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
355
356extern struct e1000_info e1000_82571_info;
357extern struct e1000_info e1000_82572_info;
358extern struct e1000_info e1000_82573_info;
359extern struct e1000_info e1000_ich8_info;
360extern struct e1000_info e1000_ich9_info;
361extern struct e1000_info e1000_es2_info;
362
363extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num);
364
365extern s32 e1000e_commit_phy(struct e1000_hw *hw);
366
367extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
368
369extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
370extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
371
372extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
373 bool state);
374extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
375extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
376
377extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
378extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
379extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
380extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
381extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
382extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
383extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
384extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
385extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
386extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
387extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
388extern s32 e1000e_id_led_init(struct e1000_hw *hw);
389extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
390extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
391extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
392extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
393extern s32 e1000e_setup_link(struct e1000_hw *hw);
394extern void e1000e_clear_vfta(struct e1000_hw *hw);
395extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
396extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
397 u8 *mc_addr_list, u32 mc_addr_count,
398 u32 rar_used_count, u32 rar_count);
399extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
400extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
401extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
402extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
403extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
404extern void e1000e_config_collision_dist(struct e1000_hw *hw);
405extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
406extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
407extern s32 e1000e_blink_led(struct e1000_hw *hw);
408extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
409extern void e1000e_reset_adaptive(struct e1000_hw *hw);
410extern void e1000e_update_adaptive(struct e1000_hw *hw);
411
412extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
413extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
414extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
415extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
416extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
417extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
418extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
419extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
420extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
421extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
422extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
423extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
424extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
425extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
426extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
427extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
428extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
429extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
430extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
431extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
432extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
433extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
434extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
435 u32 usec_interval, bool *success);
436extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
437extern s32 e1000e_check_downshift(struct e1000_hw *hw);
438
439static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
440{
441 return hw->phy.ops.reset_phy(hw);
442}
443
444static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
445{
446 return hw->phy.ops.check_reset_block(hw);
447}
448
449static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
450{
451 return hw->phy.ops.read_phy_reg(hw, offset, data);
452}
453
454static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
455{
456 return hw->phy.ops.write_phy_reg(hw, offset, data);
457}
458
459static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
460{
461 return hw->phy.ops.get_cable_length(hw);
462}
463
464extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
465extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
466extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
467extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
468extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
469extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
470extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
471extern void e1000e_release_nvm(struct e1000_hw *hw);
472extern void e1000e_reload_nvm(struct e1000_hw *hw);
473extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
474
475static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
476{
477 return hw->nvm.ops.validate_nvm(hw);
478}
479
480static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
481{
482 return hw->nvm.ops.update_nvm(hw);
483}
484
485static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
486{
487 return hw->nvm.ops.read_nvm(hw, offset, words, data);
488}
489
490static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
491{
492 return hw->nvm.ops.write_nvm(hw, offset, words, data);
493}
494
495static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
496{
497 return hw->phy.ops.get_phy_info(hw);
498}
499
500extern bool e1000e_check_mng_mode(struct e1000_hw *hw);
501extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
502extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
503
504static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
505{
506 return readl(hw->hw_addr + reg);
507}
508
509static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
510{
511 writel(val, hw->hw_addr + reg);
512}
513
514#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
new file mode 100644
index 000000000000..88657adf965f
--- /dev/null
+++ b/drivers/net/e1000e/es2lan.c
@@ -0,0 +1,1232 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 80003ES2LAN Gigabit Ethernet Controller (Copper)
31 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
32 */
33
34#include <linux/netdevice.h>
35#include <linux/ethtool.h>
36#include <linux/delay.h>
37#include <linux/pci.h>
38
39#include "e1000.h"
40
41#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
42#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
43#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
44
45#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
46#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
47#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
48
49#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
50#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
51
52#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
53#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
54
55#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
56#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
57
58/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
59#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
60#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
61#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
62#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
63#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
64
65/* PHY Specific Control Register 2 (Page 0, Register 26) */
66#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
67 /* 1=Reverse Auto-Negotiation */
68
69/* MAC Specific Control Register (Page 2, Register 21) */
70/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
71#define GG82563_MSCR_TX_CLK_MASK 0x0007
72#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
73#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
74#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
75
76#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
77
78/* DSP Distance Register (Page 5, Register 26) */
79#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
80 1 = 50-80M
81 2 = 80-110M
82 3 = 110-140M
83 4 = >140M */
84
85/* Kumeran Mode Control Register (Page 193, Register 16) */
86#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
87
88/* Power Management Control Register (Page 193, Register 20) */
89#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
90 /* 1=Enable SERDES Electrical Idle */
91
92/* In-Band Control Register (Page 194, Register 18) */
93#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
94
95/* A table for the GG82563 cable length where the range is defined
96 * with a lower bound at "index" and the upper bound at
97 * "index + 5".
98 */
99static const u16 e1000_gg82563_cable_length_table[] =
100 { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
101
102static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
103static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
104static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
105static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
106static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
107static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
108static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
109
110/**
111 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
112 * @hw: pointer to the HW structure
113 *
114 * This is a function pointer entry point called by the api module.
115 **/
116static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
117{
118 struct e1000_phy_info *phy = &hw->phy;
119 s32 ret_val;
120
121 if (hw->media_type != e1000_media_type_copper) {
122 phy->type = e1000_phy_none;
123 return 0;
124 }
125
126 phy->addr = 1;
127 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
128 phy->reset_delay_us = 100;
129 phy->type = e1000_phy_gg82563;
130
131 /* This can only be done after all function pointers are setup. */
132 ret_val = e1000e_get_phy_id(hw);
133
134 /* Verify phy id */
135 if (phy->id != GG82563_E_PHY_ID)
136 return -E1000_ERR_PHY;
137
138 return ret_val;
139}
140
141/**
142 * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
143 * @hw: pointer to the HW structure
144 *
145 * This is a function pointer entry point called by the api module.
146 **/
147static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
148{
149 struct e1000_nvm_info *nvm = &hw->nvm;
150 u32 eecd = er32(EECD);
151 u16 size;
152
153 nvm->opcode_bits = 8;
154 nvm->delay_usec = 1;
155 switch (nvm->override) {
156 case e1000_nvm_override_spi_large:
157 nvm->page_size = 32;
158 nvm->address_bits = 16;
159 break;
160 case e1000_nvm_override_spi_small:
161 nvm->page_size = 8;
162 nvm->address_bits = 8;
163 break;
164 default:
165 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
166 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
167 break;
168 }
169
170 nvm->type = e1000_nvm_eeprom_spi;
171
172 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
173 E1000_EECD_SIZE_EX_SHIFT);
174
175 /* Added to a constant, "size" becomes the left-shift value
176 * for setting word_size.
177 */
178 size += NVM_WORD_SIZE_BASE_SHIFT;
179 nvm->word_size = 1 << size;
180
181 return 0;
182}
183
184/**
185 * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
186 * @hw: pointer to the HW structure
187 *
188 * This is a function pointer entry point called by the api module.
189 **/
190static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
191{
192 struct e1000_hw *hw = &adapter->hw;
193 struct e1000_mac_info *mac = &hw->mac;
194 struct e1000_mac_operations *func = &mac->ops;
195
196 /* Set media type */
197 switch (adapter->pdev->device) {
198 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
199 hw->media_type = e1000_media_type_internal_serdes;
200 break;
201 default:
202 hw->media_type = e1000_media_type_copper;
203 break;
204 }
205
206 /* Set mta register count */
207 mac->mta_reg_count = 128;
208 /* Set rar entry count */
209 mac->rar_entry_count = E1000_RAR_ENTRIES;
210 /* Set if manageability features are enabled. */
211 mac->arc_subsystem_valid =
212 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
213
214 /* check for link */
215 switch (hw->media_type) {
216 case e1000_media_type_copper:
217 func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
218 func->check_for_link = e1000e_check_for_copper_link;
219 break;
220 case e1000_media_type_fiber:
221 func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
222 func->check_for_link = e1000e_check_for_fiber_link;
223 break;
224 case e1000_media_type_internal_serdes:
225 func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
226 func->check_for_link = e1000e_check_for_serdes_link;
227 break;
228 default:
229 return -E1000_ERR_CONFIG;
230 break;
231 }
232
233 return 0;
234}
235
236static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter)
237{
238 struct e1000_hw *hw = &adapter->hw;
239 s32 rc;
240
241 rc = e1000_init_mac_params_80003es2lan(adapter);
242 if (rc)
243 return rc;
244
245 rc = e1000_init_nvm_params_80003es2lan(hw);
246 if (rc)
247 return rc;
248
249 rc = e1000_init_phy_params_80003es2lan(hw);
250 if (rc)
251 return rc;
252
253 return 0;
254}
255
256/**
257 * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
258 * @hw: pointer to the HW structure
259 *
260 * A wrapper to acquire access rights to the correct PHY. This is a
261 * function pointer entry point called by the api module.
262 **/
263static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
264{
265 u16 mask;
266
267 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
268
269 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
270}
271
272/**
273 * e1000_release_phy_80003es2lan - Release rights to access PHY
274 * @hw: pointer to the HW structure
275 *
276 * A wrapper to release access rights to the correct PHY. This is a
277 * function pointer entry point called by the api module.
278 **/
279static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
280{
281 u16 mask;
282
283 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
284 e1000_release_swfw_sync_80003es2lan(hw, mask);
285}
286
287/**
288 * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
289 * @hw: pointer to the HW structure
290 *
291 * Acquire the semaphore to access the EEPROM. This is a function
292 * pointer entry point called by the api module.
293 **/
294static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
295{
296 s32 ret_val;
297
298 ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
299 if (ret_val)
300 return ret_val;
301
302 ret_val = e1000e_acquire_nvm(hw);
303
304 if (ret_val)
305 e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
306
307 return ret_val;
308}
309
310/**
311 * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
312 * @hw: pointer to the HW structure
313 *
314 * Release the semaphore used to access the EEPROM. This is a
315 * function pointer entry point called by the api module.
316 **/
317static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
318{
319 e1000e_release_nvm(hw);
320 e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
321}
322
323/**
324 * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
325 * @hw: pointer to the HW structure
326 * @mask: specifies which semaphore to acquire
327 *
328 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
329 * will also specify which port we're acquiring the lock for.
330 **/
331static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
332{
333 u32 swfw_sync;
334 u32 swmask = mask;
335 u32 fwmask = mask << 16;
336 s32 i = 0;
337 s32 timeout = 200;
338
339 while (i < timeout) {
340 if (e1000e_get_hw_semaphore(hw))
341 return -E1000_ERR_SWFW_SYNC;
342
343 swfw_sync = er32(SW_FW_SYNC);
344 if (!(swfw_sync & (fwmask | swmask)))
345 break;
346
347 /* Firmware currently using resource (fwmask)
348 * or other software thread using resource (swmask) */
349 e1000e_put_hw_semaphore(hw);
350 mdelay(5);
351 i++;
352 }
353
354 if (i == timeout) {
355 hw_dbg(hw,
356 "Driver can't access resource, SW_FW_SYNC timeout.\n");
357 return -E1000_ERR_SWFW_SYNC;
358 }
359
360 swfw_sync |= swmask;
361 ew32(SW_FW_SYNC, swfw_sync);
362
363 e1000e_put_hw_semaphore(hw);
364
365 return 0;
366}
367
368/**
369 * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
370 * @hw: pointer to the HW structure
371 * @mask: specifies which semaphore to acquire
372 *
373 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
374 * will also specify which port we're releasing the lock for.
375 **/
376static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
377{
378 u32 swfw_sync;
379
380 while (e1000e_get_hw_semaphore(hw) != 0);
381 /* Empty */
382
383 swfw_sync = er32(SW_FW_SYNC);
384 swfw_sync &= ~mask;
385 ew32(SW_FW_SYNC, swfw_sync);
386
387 e1000e_put_hw_semaphore(hw);
388}
389
390/**
391 * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
392 * @hw: pointer to the HW structure
393 * @offset: offset of the register to read
394 * @data: pointer to the data returned from the operation
395 *
396 * Read the GG82563 PHY register. This is a function pointer entry
397 * point called by the api module.
398 **/
399static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
400 u32 offset, u16 *data)
401{
402 s32 ret_val;
403 u32 page_select;
404 u16 temp;
405
406 /* Select Configuration Page */
407 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
408 page_select = GG82563_PHY_PAGE_SELECT;
409 else
410 /* Use Alternative Page Select register to access
411 * registers 30 and 31
412 */
413 page_select = GG82563_PHY_PAGE_SELECT_ALT;
414
415 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
416 ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp);
417 if (ret_val)
418 return ret_val;
419
420 /* The "ready" bit in the MDIC register may be incorrectly set
421 * before the device has completed the "Page Select" MDI
422 * transaction. So we wait 200us after each MDI command...
423 */
424 udelay(200);
425
426 /* ...and verify the command was successful. */
427 ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp);
428
429 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
430 ret_val = -E1000_ERR_PHY;
431 return ret_val;
432 }
433
434 udelay(200);
435
436 ret_val = e1000e_read_phy_reg_m88(hw,
437 MAX_PHY_REG_ADDRESS & offset,
438 data);
439
440 udelay(200);
441
442 return ret_val;
443}
444
445/**
446 * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
447 * @hw: pointer to the HW structure
448 * @offset: offset of the register to read
449 * @data: value to write to the register
450 *
451 * Write to the GG82563 PHY register. This is a function pointer entry
452 * point called by the api module.
453 **/
454static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
455 u32 offset, u16 data)
456{
457 s32 ret_val;
458 u32 page_select;
459 u16 temp;
460
461 /* Select Configuration Page */
462 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
463 page_select = GG82563_PHY_PAGE_SELECT;
464 else
465 /* Use Alternative Page Select register to access
466 * registers 30 and 31
467 */
468 page_select = GG82563_PHY_PAGE_SELECT_ALT;
469
470 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
471 ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp);
472 if (ret_val)
473 return ret_val;
474
475
476 /* The "ready" bit in the MDIC register may be incorrectly set
477 * before the device has completed the "Page Select" MDI
478 * transaction. So we wait 200us after each MDI command...
479 */
480 udelay(200);
481
482 /* ...and verify the command was successful. */
483 ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp);
484
485 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp)
486 return -E1000_ERR_PHY;
487
488 udelay(200);
489
490 ret_val = e1000e_write_phy_reg_m88(hw,
491 MAX_PHY_REG_ADDRESS & offset,
492 data);
493
494 udelay(200);
495
496 return ret_val;
497}
498
499/**
500 * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
501 * @hw: pointer to the HW structure
502 * @offset: offset of the register to read
503 * @words: number of words to write
504 * @data: buffer of data to write to the NVM
505 *
506 * Write "words" of data to the ESB2 NVM. This is a function
507 * pointer entry point called by the api module.
508 **/
509static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
510 u16 words, u16 *data)
511{
512 return e1000e_write_nvm_spi(hw, offset, words, data);
513}
514
515/**
516 * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
517 * @hw: pointer to the HW structure
518 *
519 * Wait a specific amount of time for manageability processes to complete.
520 * This is a function pointer entry point called by the phy module.
521 **/
522static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
523{
524 s32 timeout = PHY_CFG_TIMEOUT;
525 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
526
527 if (hw->bus.func == 1)
528 mask = E1000_NVM_CFG_DONE_PORT_1;
529
530 while (timeout) {
531 if (er32(EEMNGCTL) & mask)
532 break;
533 msleep(1);
534 timeout--;
535 }
536 if (!timeout) {
537 hw_dbg(hw, "MNG configuration cycle has not completed.\n");
538 return -E1000_ERR_RESET;
539 }
540
541 return 0;
542}
543
544/**
545 * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
546 * @hw: pointer to the HW structure
547 *
548 * Force the speed and duplex settings onto the PHY. This is a
549 * function pointer entry point called by the phy module.
550 **/
551static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
552{
553 s32 ret_val;
554 u16 phy_data;
555 bool link;
556
557 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
558 * forced whenever speed and duplex are forced.
559 */
560 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
561 if (ret_val)
562 return ret_val;
563
564 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
565 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
566 if (ret_val)
567 return ret_val;
568
569 hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data);
570
571 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
572 if (ret_val)
573 return ret_val;
574
575 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
576
577 /* Reset the phy to commit changes. */
578 phy_data |= MII_CR_RESET;
579
580 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
581 if (ret_val)
582 return ret_val;
583
584 udelay(1);
585
586 if (hw->phy.wait_for_link) {
587 hw_dbg(hw, "Waiting for forced speed/duplex link "
588 "on GG82563 phy.\n");
589
590 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
591 100000, &link);
592 if (ret_val)
593 return ret_val;
594
595 if (!link) {
596 /* We didn't get link.
597 * Reset the DSP and cross our fingers.
598 */
599 ret_val = e1000e_phy_reset_dsp(hw);
600 if (ret_val)
601 return ret_val;
602 }
603
604 /* Try once more */
605 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
606 100000, &link);
607 if (ret_val)
608 return ret_val;
609 }
610
611 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
612 if (ret_val)
613 return ret_val;
614
615 /* Resetting the phy means we need to verify the TX_CLK corresponds
616 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
617 */
618 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
619 if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
620 phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
621 else
622 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
623
624 /* In addition, we must re-enable CRS on Tx for both half and full
625 * duplex.
626 */
627 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
628 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
629
630 return ret_val;
631}
632
633/**
634 * e1000_get_cable_length_80003es2lan - Set approximate cable length
635 * @hw: pointer to the HW structure
636 *
637 * Find the approximate cable length as measured by the GG82563 PHY.
638 * This is a function pointer entry point called by the phy module.
639 **/
640static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
641{
642 struct e1000_phy_info *phy = &hw->phy;
643 s32 ret_val;
644 u16 phy_data;
645 u16 index;
646
647 ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
648 if (ret_val)
649 return ret_val;
650
651 index = phy_data & GG82563_DSPD_CABLE_LENGTH;
652 phy->min_cable_length = e1000_gg82563_cable_length_table[index];
653 phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
654
655 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
656
657 return 0;
658}
659
660/**
661 * e1000_get_link_up_info_80003es2lan - Report speed and duplex
662 * @hw: pointer to the HW structure
663 * @speed: pointer to speed buffer
664 * @duplex: pointer to duplex buffer
665 *
666 * Retrieve the current speed and duplex configuration.
667 * This is a function pointer entry point called by the api module.
668 **/
669static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
670 u16 *duplex)
671{
672 s32 ret_val;
673
674 if (hw->media_type == e1000_media_type_copper) {
675 ret_val = e1000e_get_speed_and_duplex_copper(hw,
676 speed,
677 duplex);
678 if (ret_val)
679 return ret_val;
680 if (*speed == SPEED_1000)
681 ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
682 else
683 ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw,
684 *duplex);
685 } else {
686 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
687 speed,
688 duplex);
689 }
690
691 return ret_val;
692}
693
694/**
695 * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
696 * @hw: pointer to the HW structure
697 *
698 * Perform a global reset to the ESB2 controller.
699 * This is a function pointer entry point called by the api module.
700 **/
701static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
702{
703 u32 ctrl;
704 u32 icr;
705 s32 ret_val;
706
707 /* Prevent the PCI-E bus from sticking if there is no TLP connection
708 * on the last TLP read/write transaction when MAC is reset.
709 */
710 ret_val = e1000e_disable_pcie_master(hw);
711 if (ret_val)
712 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
713
714 hw_dbg(hw, "Masking off all interrupts\n");
715 ew32(IMC, 0xffffffff);
716
717 ew32(RCTL, 0);
718 ew32(TCTL, E1000_TCTL_PSP);
719 e1e_flush();
720
721 msleep(10);
722
723 ctrl = er32(CTRL);
724
725 hw_dbg(hw, "Issuing a global reset to MAC\n");
726 ew32(CTRL, ctrl | E1000_CTRL_RST);
727
728 ret_val = e1000e_get_auto_rd_done(hw);
729 if (ret_val)
730 /* We don't want to continue accessing MAC registers. */
731 return ret_val;
732
733 /* Clear any pending interrupt events. */
734 ew32(IMC, 0xffffffff);
735 icr = er32(ICR);
736
737 return 0;
738}
739
740/**
741 * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
742 * @hw: pointer to the HW structure
743 *
744 * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
745 * This is a function pointer entry point called by the api module.
746 **/
747static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
748{
749 struct e1000_mac_info *mac = &hw->mac;
750 u32 reg_data;
751 s32 ret_val;
752 u16 i;
753
754 e1000_initialize_hw_bits_80003es2lan(hw);
755
756 /* Initialize identification LED */
757 ret_val = e1000e_id_led_init(hw);
758 if (ret_val) {
759 hw_dbg(hw, "Error initializing identification LED\n");
760 return ret_val;
761 }
762
763 /* Disabling VLAN filtering */
764 hw_dbg(hw, "Initializing the IEEE VLAN\n");
765 e1000e_clear_vfta(hw);
766
767 /* Setup the receive address. */
768 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
769
770 /* Zero out the Multicast HASH table */
771 hw_dbg(hw, "Zeroing the MTA\n");
772 for (i = 0; i < mac->mta_reg_count; i++)
773 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
774
775 /* Setup link and flow control */
776 ret_val = e1000e_setup_link(hw);
777
778 /* Set the transmit descriptor write-back policy */
779 reg_data = er32(TXDCTL);
780 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
781 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
782 ew32(TXDCTL, reg_data);
783
784 /* ...for both queues. */
785 reg_data = er32(TXDCTL1);
786 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
787 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
788 ew32(TXDCTL1, reg_data);
789
790 /* Enable retransmit on late collisions */
791 reg_data = er32(TCTL);
792 reg_data |= E1000_TCTL_RTLC;
793 ew32(TCTL, reg_data);
794
795 /* Configure Gigabit Carry Extend Padding */
796 reg_data = er32(TCTL_EXT);
797 reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
798 reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
799 ew32(TCTL_EXT, reg_data);
800
801 /* Configure Transmit Inter-Packet Gap */
802 reg_data = er32(TIPG);
803 reg_data &= ~E1000_TIPG_IPGT_MASK;
804 reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
805 ew32(TIPG, reg_data);
806
807 reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
808 reg_data &= ~0x00100000;
809 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
810
811 /* Clear all of the statistics registers (clear on read). It is
812 * important that we do this after we have tried to establish link
813 * because the symbol error count will increment wildly if there
814 * is no link.
815 */
816 e1000_clear_hw_cntrs_80003es2lan(hw);
817
818 return ret_val;
819}
820
821/**
822 * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
823 * @hw: pointer to the HW structure
824 *
825 * Initializes required hardware-dependent bits needed for normal operation.
826 **/
827static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
828{
829 u32 reg;
830
831 /* Transmit Descriptor Control 0 */
832 reg = er32(TXDCTL);
833 reg |= (1 << 22);
834 ew32(TXDCTL, reg);
835
836 /* Transmit Descriptor Control 1 */
837 reg = er32(TXDCTL1);
838 reg |= (1 << 22);
839 ew32(TXDCTL1, reg);
840
841 /* Transmit Arbitration Control 0 */
842 reg = er32(TARC0);
843 reg &= ~(0xF << 27); /* 30:27 */
844 if (hw->media_type != e1000_media_type_copper)
845 reg &= ~(1 << 20);
846 ew32(TARC0, reg);
847
848 /* Transmit Arbitration Control 1 */
849 reg = er32(TARC1);
850 if (er32(TCTL) & E1000_TCTL_MULR)
851 reg &= ~(1 << 28);
852 else
853 reg |= (1 << 28);
854 ew32(TARC1, reg);
855}
856
857/**
858 * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
859 * @hw: pointer to the HW structure
860 *
861 * Setup some GG82563 PHY registers for obtaining link
862 **/
863static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
864{
865 struct e1000_phy_info *phy = &hw->phy;
866 s32 ret_val;
867 u32 ctrl_ext;
868 u16 data;
869
870 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL,
871 &data);
872 if (ret_val)
873 return ret_val;
874
875 data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
876 /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
877 data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
878
879 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL,
880 data);
881 if (ret_val)
882 return ret_val;
883
884 /* Options:
885 * MDI/MDI-X = 0 (default)
886 * 0 - Auto for all speeds
887 * 1 - MDI mode
888 * 2 - MDI-X mode
889 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
890 */
891 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
892 if (ret_val)
893 return ret_val;
894
895 data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
896
897 switch (phy->mdix) {
898 case 1:
899 data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
900 break;
901 case 2:
902 data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
903 break;
904 case 0:
905 default:
906 data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
907 break;
908 }
909
910 /* Options:
911 * disable_polarity_correction = 0 (default)
912 * Automatic Correction for Reversed Cable Polarity
913 * 0 - Disabled
914 * 1 - Enabled
915 */
916 data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
917 if (phy->disable_polarity_correction)
918 data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
919
920 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
921 if (ret_val)
922 return ret_val;
923
924 /* SW Reset the PHY so all changes take effect */
925 ret_val = e1000e_commit_phy(hw);
926 if (ret_val) {
927 hw_dbg(hw, "Error Resetting the PHY\n");
928 return ret_val;
929 }
930
931 /* Bypass RX and TX FIFO's */
932 ret_val = e1000e_write_kmrn_reg(hw,
933 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
934 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
935 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
936 if (ret_val)
937 return ret_val;
938
939 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
940 if (ret_val)
941 return ret_val;
942
943 data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
944 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
945 if (ret_val)
946 return ret_val;
947
948 ctrl_ext = er32(CTRL_EXT);
949 ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
950 ew32(CTRL_EXT, ctrl_ext);
951
952 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
953 if (ret_val)
954 return ret_val;
955
956 /* Do not init these registers when the HW is in IAMT mode, since the
957 * firmware will have already initialized them. We only initialize
958 * them if the HW is not in IAMT mode.
959 */
960 if (!e1000e_check_mng_mode(hw)) {
961 /* Enable Electrical Idle on the PHY */
962 data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
963 ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
964 if (ret_val)
965 return ret_val;
966
967 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
968 if (ret_val)
969 return ret_val;
970
971 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
972 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
973 if (ret_val)
974 return ret_val;
975 }
976
977 /* Workaround: Disable padding in Kumeran interface in the MAC
978 * and in the PHY to avoid CRC errors.
979 */
980 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
981 if (ret_val)
982 return ret_val;
983
984 data |= GG82563_ICR_DIS_PADDING;
985 ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
986 if (ret_val)
987 return ret_val;
988
989 return 0;
990}
991
992/**
993 * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
994 * @hw: pointer to the HW structure
995 *
996 * Essentially a wrapper for setting up all things "copper" related.
997 * This is a function pointer entry point called by the mac module.
998 **/
999static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1000{
1001 u32 ctrl;
1002 s32 ret_val;
1003 u16 reg_data;
1004
1005 ctrl = er32(CTRL);
1006 ctrl |= E1000_CTRL_SLU;
1007 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1008 ew32(CTRL, ctrl);
1009
1010 /* Set the mac to wait the maximum time between each
1011 * iteration and increase the max iterations when
1012 * polling the phy; this fixes erroneous timeouts at 10Mbps. */
1013 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1014 if (ret_val)
1015 return ret_val;
1016 ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
1017 if (ret_val)
1018 return ret_val;
1019 reg_data |= 0x3F;
1020 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
1021 if (ret_val)
1022 return ret_val;
1023 ret_val = e1000e_read_kmrn_reg(hw,
1024 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1025 &reg_data);
1026 if (ret_val)
1027 return ret_val;
1028 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1029 ret_val = e1000e_write_kmrn_reg(hw,
1030 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1031 reg_data);
1032 if (ret_val)
1033 return ret_val;
1034
1035 ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
1036 if (ret_val)
1037 return ret_val;
1038
1039 ret_val = e1000e_setup_copper_link(hw);
1040
1041 return 0;
1042}
1043
1044/**
1045 * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
1046 * @hw: pointer to the HW structure
1047 * @duplex: current duplex setting
1048 *
1049 * Configure the KMRN interface by applying last minute quirks for
1050 * 10/100 operation.
1051 **/
1052static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1053{
1054 s32 ret_val;
1055 u32 tipg;
1056 u16 reg_data;
1057
1058 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1059 ret_val = e1000e_write_kmrn_reg(hw,
1060 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1061 reg_data);
1062 if (ret_val)
1063 return ret_val;
1064
1065 /* Configure Transmit Inter-Packet Gap */
1066 tipg = er32(TIPG);
1067 tipg &= ~E1000_TIPG_IPGT_MASK;
1068 tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
1069 ew32(TIPG, tipg);
1070
1071 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1072 if (ret_val)
1073 return ret_val;
1074
1075 if (duplex == HALF_DUPLEX)
1076 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
1077 else
1078 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1079
1080 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1081
1082 return 0;
1083}
1084
1085/**
1086 * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
1087 * @hw: pointer to the HW structure
1088 *
1089 * Configure the KMRN interface by applying last minute quirks for
1090 * gigabit operation.
1091 **/
1092static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1093{
1094 s32 ret_val;
1095 u16 reg_data;
1096 u32 tipg;
1097
1098 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1099 ret_val = e1000e_write_kmrn_reg(hw,
1100 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1101 reg_data);
1102 if (ret_val)
1103 return ret_val;
1104
1105 /* Configure Transmit Inter-Packet Gap */
1106 tipg = er32(TIPG);
1107 tipg &= ~E1000_TIPG_IPGT_MASK;
1108 tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
1109 ew32(TIPG, tipg);
1110
1111 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1112 if (ret_val)
1113 return ret_val;
1114
1115 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1116 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1117
1118 return ret_val;
1119}
1120
1121/**
1122 * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
1123 * @hw: pointer to the HW structure
1124 *
1125 * Clears the hardware counters by reading the counter registers.
1126 **/
1127static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1128{
1129 u32 temp;
1130
1131 e1000e_clear_hw_cntrs_base(hw);
1132
1133 temp = er32(PRC64);
1134 temp = er32(PRC127);
1135 temp = er32(PRC255);
1136 temp = er32(PRC511);
1137 temp = er32(PRC1023);
1138 temp = er32(PRC1522);
1139 temp = er32(PTC64);
1140 temp = er32(PTC127);
1141 temp = er32(PTC255);
1142 temp = er32(PTC511);
1143 temp = er32(PTC1023);
1144 temp = er32(PTC1522);
1145
1146 temp = er32(ALGNERRC);
1147 temp = er32(RXERRC);
1148 temp = er32(TNCRS);
1149 temp = er32(CEXTERR);
1150 temp = er32(TSCTC);
1151 temp = er32(TSCTFC);
1152
1153 temp = er32(MGTPRC);
1154 temp = er32(MGTPDC);
1155 temp = er32(MGTPTC);
1156
1157 temp = er32(IAC);
1158 temp = er32(ICRXOC);
1159
1160 temp = er32(ICRXPTC);
1161 temp = er32(ICRXATC);
1162 temp = er32(ICTXPTC);
1163 temp = er32(ICTXATC);
1164 temp = er32(ICTXQEC);
1165 temp = er32(ICTXQMTC);
1166 temp = er32(ICRXDMTC);
1167}
1168
1169static struct e1000_mac_operations es2_mac_ops = {
1170 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
1171 /* check_for_link dependent on media type */
1172 .cleanup_led = e1000e_cleanup_led_generic,
1173 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
1174 .get_bus_info = e1000e_get_bus_info_pcie,
1175 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1176 .led_on = e1000e_led_on_generic,
1177 .led_off = e1000e_led_off_generic,
1178 .mc_addr_list_update = e1000e_mc_addr_list_update_generic,
1179 .reset_hw = e1000_reset_hw_80003es2lan,
1180 .init_hw = e1000_init_hw_80003es2lan,
1181 .setup_link = e1000e_setup_link,
1182 /* setup_physical_interface dependent on media type */
1183};
1184
1185static struct e1000_phy_operations es2_phy_ops = {
1186 .acquire_phy = e1000_acquire_phy_80003es2lan,
1187 .check_reset_block = e1000e_check_reset_block_generic,
1188 .commit_phy = e1000e_phy_sw_reset,
1189 .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
1190 .get_cfg_done = e1000_get_cfg_done_80003es2lan,
1191 .get_cable_length = e1000_get_cable_length_80003es2lan,
1192 .get_phy_info = e1000e_get_phy_info_m88,
1193 .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan,
1194 .release_phy = e1000_release_phy_80003es2lan,
1195 .reset_phy = e1000e_phy_hw_reset_generic,
1196 .set_d0_lplu_state = NULL,
1197 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1198 .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan,
1199};
1200
1201static struct e1000_nvm_operations es2_nvm_ops = {
1202 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
1203 .read_nvm = e1000e_read_nvm_eerd,
1204 .release_nvm = e1000_release_nvm_80003es2lan,
1205 .update_nvm = e1000e_update_nvm_checksum_generic,
1206 .valid_led_default = e1000e_valid_led_default,
1207 .validate_nvm = e1000e_validate_nvm_checksum_generic,
1208 .write_nvm = e1000_write_nvm_80003es2lan,
1209};
1210
1211struct e1000_info e1000_es2_info = {
1212 .mac = e1000_80003es2lan,
1213 .flags = FLAG_HAS_HW_VLAN_FILTER
1214 | FLAG_HAS_JUMBO_FRAMES
1215 | FLAG_HAS_STATS_PTC_PRC
1216 | FLAG_HAS_WOL
1217 | FLAG_APME_IN_CTRL3
1218 | FLAG_RX_CSUM_ENABLED
1219 | FLAG_HAS_CTRLEXT_ON_LOAD
1220 | FLAG_HAS_STATS_ICR_ICT
1221 | FLAG_RX_NEEDS_RESTART /* errata */
1222 | FLAG_TARC_SET_BIT_ZERO /* errata */
1223 | FLAG_APME_CHECK_PORT_B
1224 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1225 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1226 .pba = 38,
1227 .get_invariants = e1000_get_invariants_80003es2lan,
1228 .mac_ops = &es2_mac_ops,
1229 .phy_ops = &es2_phy_ops,
1230 .nvm_ops = &es2_nvm_ops,
1231};
1232
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
new file mode 100644
index 000000000000..0e80406bfbd7
--- /dev/null
+++ b/drivers/net/e1000e/ethtool.c
@@ -0,0 +1,1774 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ethtool support for e1000 */
30
31#include <linux/netdevice.h>
32#include <linux/ethtool.h>
33#include <linux/pci.h>
34#include <linux/delay.h>
35
36#include "e1000.h"
37
38struct e1000_stats {
39 char stat_string[ETH_GSTRING_LEN];
40 int sizeof_stat;
41 int stat_offset;
42};
43
44#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
45 offsetof(struct e1000_adapter, m)
46static const struct e1000_stats e1000_gstrings_stats[] = {
47 { "rx_packets", E1000_STAT(stats.gprc) },
48 { "tx_packets", E1000_STAT(stats.gptc) },
49 { "rx_bytes", E1000_STAT(stats.gorcl) },
50 { "tx_bytes", E1000_STAT(stats.gotcl) },
51 { "rx_broadcast", E1000_STAT(stats.bprc) },
52 { "tx_broadcast", E1000_STAT(stats.bptc) },
53 { "rx_multicast", E1000_STAT(stats.mprc) },
54 { "tx_multicast", E1000_STAT(stats.mptc) },
55 { "rx_errors", E1000_STAT(net_stats.rx_errors) },
56 { "tx_errors", E1000_STAT(net_stats.tx_errors) },
57 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
58 { "multicast", E1000_STAT(stats.mprc) },
59 { "collisions", E1000_STAT(stats.colc) },
60 { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
61 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
62 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
63 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
64 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
65 { "rx_missed_errors", E1000_STAT(stats.mpc) },
66 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
67 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
68 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
69 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
70 { "tx_window_errors", E1000_STAT(stats.latecol) },
71 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
72 { "tx_deferred_ok", E1000_STAT(stats.dc) },
73 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
74 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
75 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
76 { "tx_restart_queue", E1000_STAT(restart_queue) },
77 { "rx_long_length_errors", E1000_STAT(stats.roc) },
78 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
79 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
80 { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
81 { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
82 { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
83 { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
84 { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
85 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
86 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
87 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
88 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
89 { "rx_header_split", E1000_STAT(rx_hdr_split) },
90 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
91 { "tx_smbus", E1000_STAT(stats.mgptc) },
92 { "rx_smbus", E1000_STAT(stats.mgprc) },
93 { "dropped_smbus", E1000_STAT(stats.mgpdc) },
94 { "rx_dma_failed", E1000_STAT(rx_dma_failed) },
95 { "tx_dma_failed", E1000_STAT(tx_dma_failed) },
96};
97
98#define E1000_GLOBAL_STATS_LEN \
99 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
100#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
101static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Register test (offline)", "Eeprom test (offline)",
103 "Interrupt test (offline)", "Loopback test (offline)",
104 "Link test (on/offline)"
105};
106#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
107
108static int e1000_get_settings(struct net_device *netdev,
109 struct ethtool_cmd *ecmd)
110{
111 struct e1000_adapter *adapter = netdev_priv(netdev);
112 struct e1000_hw *hw = &adapter->hw;
113
114 if (hw->media_type == e1000_media_type_copper) {
115
116 ecmd->supported = (SUPPORTED_10baseT_Half |
117 SUPPORTED_10baseT_Full |
118 SUPPORTED_100baseT_Half |
119 SUPPORTED_100baseT_Full |
120 SUPPORTED_1000baseT_Full |
121 SUPPORTED_Autoneg |
122 SUPPORTED_TP);
123 if (hw->phy.type == e1000_phy_ife)
124 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
125 ecmd->advertising = ADVERTISED_TP;
126
127 if (hw->mac.autoneg == 1) {
128 ecmd->advertising |= ADVERTISED_Autoneg;
129 /* the e1000 autoneg seems to match ethtool nicely */
130 ecmd->advertising |= hw->phy.autoneg_advertised;
131 }
132
133 ecmd->port = PORT_TP;
134 ecmd->phy_address = hw->phy.addr;
135 ecmd->transceiver = XCVR_INTERNAL;
136
137 } else {
138 ecmd->supported = (SUPPORTED_1000baseT_Full |
139 SUPPORTED_FIBRE |
140 SUPPORTED_Autoneg);
141
142 ecmd->advertising = (ADVERTISED_1000baseT_Full |
143 ADVERTISED_FIBRE |
144 ADVERTISED_Autoneg);
145
146 ecmd->port = PORT_FIBRE;
147 ecmd->transceiver = XCVR_EXTERNAL;
148 }
149
150 if (er32(STATUS) & E1000_STATUS_LU) {
151
152 adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed,
153 &adapter->link_duplex);
154 ecmd->speed = adapter->link_speed;
155
156 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
157 * and HALF_DUPLEX != DUPLEX_HALF */
158
159 if (adapter->link_duplex == FULL_DUPLEX)
160 ecmd->duplex = DUPLEX_FULL;
161 else
162 ecmd->duplex = DUPLEX_HALF;
163 } else {
164 ecmd->speed = -1;
165 ecmd->duplex = -1;
166 }
167
168 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
169 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
170 return 0;
171}
172
173static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
174{
175 struct e1000_mac_info *mac = &adapter->hw.mac;
176
177 mac->autoneg = 0;
178
179 /* Fiber NICs only allow 1000 gbps Full duplex */
180 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
181 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
182 ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
183 "configuration\n");
184 return -EINVAL;
185 }
186
187 switch (spddplx) {
188 case SPEED_10 + DUPLEX_HALF:
189 mac->forced_speed_duplex = ADVERTISE_10_HALF;
190 break;
191 case SPEED_10 + DUPLEX_FULL:
192 mac->forced_speed_duplex = ADVERTISE_10_FULL;
193 break;
194 case SPEED_100 + DUPLEX_HALF:
195 mac->forced_speed_duplex = ADVERTISE_100_HALF;
196 break;
197 case SPEED_100 + DUPLEX_FULL:
198 mac->forced_speed_duplex = ADVERTISE_100_FULL;
199 break;
200 case SPEED_1000 + DUPLEX_FULL:
201 mac->autoneg = 1;
202 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
203 break;
204 case SPEED_1000 + DUPLEX_HALF: /* not supported */
205 default:
206 ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
207 "configuration\n");
208 return -EINVAL;
209 }
210 return 0;
211}
212
213static int e1000_set_settings(struct net_device *netdev,
214 struct ethtool_cmd *ecmd)
215{
216 struct e1000_adapter *adapter = netdev_priv(netdev);
217 struct e1000_hw *hw = &adapter->hw;
218
219 /* When SoL/IDER sessions are active, autoneg/speed/duplex
220 * cannot be changed */
221 if (e1000_check_reset_block(hw)) {
222 ndev_err(netdev, "Cannot change link "
223 "characteristics when SoL/IDER is active.\n");
224 return -EINVAL;
225 }
226
227 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
228 msleep(1);
229
230 if (ecmd->autoneg == AUTONEG_ENABLE) {
231 hw->mac.autoneg = 1;
232 if (hw->media_type == e1000_media_type_fiber)
233 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
234 ADVERTISED_FIBRE |
235 ADVERTISED_Autoneg;
236 else
237 hw->phy.autoneg_advertised = ecmd->advertising |
238 ADVERTISED_TP |
239 ADVERTISED_Autoneg;
240 ecmd->advertising = hw->phy.autoneg_advertised;
241 } else {
242 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
243 clear_bit(__E1000_RESETTING, &adapter->state);
244 return -EINVAL;
245 }
246 }
247
248 /* reset the link */
249
250 if (netif_running(adapter->netdev)) {
251 e1000e_down(adapter);
252 e1000e_up(adapter);
253 } else {
254 e1000e_reset(adapter);
255 }
256
257 clear_bit(__E1000_RESETTING, &adapter->state);
258 return 0;
259}
260
261static void e1000_get_pauseparam(struct net_device *netdev,
262 struct ethtool_pauseparam *pause)
263{
264 struct e1000_adapter *adapter = netdev_priv(netdev);
265 struct e1000_hw *hw = &adapter->hw;
266
267 pause->autoneg =
268 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
269
270 if (hw->mac.fc == e1000_fc_rx_pause) {
271 pause->rx_pause = 1;
272 } else if (hw->mac.fc == e1000_fc_tx_pause) {
273 pause->tx_pause = 1;
274 } else if (hw->mac.fc == e1000_fc_full) {
275 pause->rx_pause = 1;
276 pause->tx_pause = 1;
277 }
278}
279
280static int e1000_set_pauseparam(struct net_device *netdev,
281 struct ethtool_pauseparam *pause)
282{
283 struct e1000_adapter *adapter = netdev_priv(netdev);
284 struct e1000_hw *hw = &adapter->hw;
285 int retval = 0;
286
287 adapter->fc_autoneg = pause->autoneg;
288
289 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
290 msleep(1);
291
292 if (pause->rx_pause && pause->tx_pause)
293 hw->mac.fc = e1000_fc_full;
294 else if (pause->rx_pause && !pause->tx_pause)
295 hw->mac.fc = e1000_fc_rx_pause;
296 else if (!pause->rx_pause && pause->tx_pause)
297 hw->mac.fc = e1000_fc_tx_pause;
298 else if (!pause->rx_pause && !pause->tx_pause)
299 hw->mac.fc = e1000_fc_none;
300
301 hw->mac.original_fc = hw->mac.fc;
302
303 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
304 if (netif_running(adapter->netdev)) {
305 e1000e_down(adapter);
306 e1000e_up(adapter);
307 } else {
308 e1000e_reset(adapter);
309 }
310 } else {
311 retval = ((hw->media_type == e1000_media_type_fiber) ?
312 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
313 }
314
315 clear_bit(__E1000_RESETTING, &adapter->state);
316 return retval;
317}
318
319static u32 e1000_get_rx_csum(struct net_device *netdev)
320{
321 struct e1000_adapter *adapter = netdev_priv(netdev);
322 return (adapter->flags & FLAG_RX_CSUM_ENABLED);
323}
324
325static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
326{
327 struct e1000_adapter *adapter = netdev_priv(netdev);
328
329 if (data)
330 adapter->flags |= FLAG_RX_CSUM_ENABLED;
331 else
332 adapter->flags &= ~FLAG_RX_CSUM_ENABLED;
333
334 if (netif_running(netdev))
335 e1000e_reinit_locked(adapter);
336 else
337 e1000e_reset(adapter);
338 return 0;
339}
340
341static u32 e1000_get_tx_csum(struct net_device *netdev)
342{
343 return ((netdev->features & NETIF_F_HW_CSUM) != 0);
344}
345
346static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
347{
348 if (data)
349 netdev->features |= NETIF_F_HW_CSUM;
350 else
351 netdev->features &= ~NETIF_F_HW_CSUM;
352
353 return 0;
354}
355
356static int e1000_set_tso(struct net_device *netdev, u32 data)
357{
358 struct e1000_adapter *adapter = netdev_priv(netdev);
359
360 if (data) {
361 netdev->features |= NETIF_F_TSO;
362 netdev->features |= NETIF_F_TSO6;
363 } else {
364 netdev->features &= ~NETIF_F_TSO;
365 netdev->features &= ~NETIF_F_TSO6;
366 }
367
368 ndev_info(netdev, "TSO is %s\n",
369 data ? "Enabled" : "Disabled");
370 adapter->flags |= FLAG_TSO_FORCE;
371 return 0;
372}
373
374static u32 e1000_get_msglevel(struct net_device *netdev)
375{
376 struct e1000_adapter *adapter = netdev_priv(netdev);
377 return adapter->msg_enable;
378}
379
380static void e1000_set_msglevel(struct net_device *netdev, u32 data)
381{
382 struct e1000_adapter *adapter = netdev_priv(netdev);
383 adapter->msg_enable = data;
384}
385
386static int e1000_get_regs_len(struct net_device *netdev)
387{
388#define E1000_REGS_LEN 32 /* overestimate */
389 return E1000_REGS_LEN * sizeof(u32);
390}
391
392static void e1000_get_regs(struct net_device *netdev,
393 struct ethtool_regs *regs, void *p)
394{
395 struct e1000_adapter *adapter = netdev_priv(netdev);
396 struct e1000_hw *hw = &adapter->hw;
397 u32 *regs_buff = p;
398 u16 phy_data;
399 u8 revision_id;
400
401 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
402
403 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
404
405 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
406
407 regs_buff[0] = er32(CTRL);
408 regs_buff[1] = er32(STATUS);
409
410 regs_buff[2] = er32(RCTL);
411 regs_buff[3] = er32(RDLEN);
412 regs_buff[4] = er32(RDH);
413 regs_buff[5] = er32(RDT);
414 regs_buff[6] = er32(RDTR);
415
416 regs_buff[7] = er32(TCTL);
417 regs_buff[8] = er32(TDLEN);
418 regs_buff[9] = er32(TDH);
419 regs_buff[10] = er32(TDT);
420 regs_buff[11] = er32(TIDV);
421
422 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
423 if (hw->phy.type == e1000_phy_m88) {
424 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
425 regs_buff[13] = (u32)phy_data; /* cable length */
426 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
427 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
428 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
429 e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
430 regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
431 regs_buff[18] = regs_buff[13]; /* cable polarity */
432 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
433 regs_buff[20] = regs_buff[17]; /* polarity correction */
434 /* phy receive errors */
435 regs_buff[22] = adapter->phy_stats.receive_errors;
436 regs_buff[23] = regs_buff[13]; /* mdix mode */
437 }
438 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
439 e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
440 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
441 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
442}
443
444static int e1000_get_eeprom_len(struct net_device *netdev)
445{
446 struct e1000_adapter *adapter = netdev_priv(netdev);
447 return adapter->hw.nvm.word_size * 2;
448}
449
450static int e1000_get_eeprom(struct net_device *netdev,
451 struct ethtool_eeprom *eeprom, u8 *bytes)
452{
453 struct e1000_adapter *adapter = netdev_priv(netdev);
454 struct e1000_hw *hw = &adapter->hw;
455 u16 *eeprom_buff;
456 int first_word;
457 int last_word;
458 int ret_val = 0;
459 u16 i;
460
461 if (eeprom->len == 0)
462 return -EINVAL;
463
464 eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
465
466 first_word = eeprom->offset >> 1;
467 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
468
469 eeprom_buff = kmalloc(sizeof(u16) *
470 (last_word - first_word + 1), GFP_KERNEL);
471 if (!eeprom_buff)
472 return -ENOMEM;
473
474 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
475 ret_val = e1000_read_nvm(hw, first_word,
476 last_word - first_word + 1,
477 eeprom_buff);
478 } else {
479 for (i = 0; i < last_word - first_word + 1; i++) {
480 ret_val = e1000_read_nvm(hw, first_word + i, 1,
481 &eeprom_buff[i]);
482 if (ret_val)
483 break;
484 }
485 }
486
487 /* Device's eeprom is always little-endian, word addressable */
488 for (i = 0; i < last_word - first_word + 1; i++)
489 le16_to_cpus(&eeprom_buff[i]);
490
491 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
492 kfree(eeprom_buff);
493
494 return ret_val;
495}
496
497static int e1000_set_eeprom(struct net_device *netdev,
498 struct ethtool_eeprom *eeprom, u8 *bytes)
499{
500 struct e1000_adapter *adapter = netdev_priv(netdev);
501 struct e1000_hw *hw = &adapter->hw;
502 u16 *eeprom_buff;
503 void *ptr;
504 int max_len;
505 int first_word;
506 int last_word;
507 int ret_val = 0;
508 u16 i;
509
510 if (eeprom->len == 0)
511 return -EOPNOTSUPP;
512
513 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
514 return -EFAULT;
515
516 max_len = hw->nvm.word_size * 2;
517
518 first_word = eeprom->offset >> 1;
519 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
520 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
521 if (!eeprom_buff)
522 return -ENOMEM;
523
524 ptr = (void *)eeprom_buff;
525
526 if (eeprom->offset & 1) {
527 /* need read/modify/write of first changed EEPROM word */
528 /* only the second byte of the word is being modified */
529 ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
530 ptr++;
531 }
532 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
533 /* need read/modify/write of last changed EEPROM word */
534 /* only the first byte of the word is being modified */
535 ret_val = e1000_read_nvm(hw, last_word, 1,
536 &eeprom_buff[last_word - first_word]);
537
538 /* Device's eeprom is always little-endian, word addressable */
539 for (i = 0; i < last_word - first_word + 1; i++)
540 le16_to_cpus(&eeprom_buff[i]);
541
542 memcpy(ptr, bytes, eeprom->len);
543
544 for (i = 0; i < last_word - first_word + 1; i++)
545 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
546
547 ret_val = e1000_write_nvm(hw, first_word,
548 last_word - first_word + 1, eeprom_buff);
549
550 /* Update the checksum over the first part of the EEPROM if needed
551 * and flush shadow RAM for 82573 controllers */
552 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
553 (hw->mac.type == e1000_82573)))
554 e1000e_update_nvm_checksum(hw);
555
556 kfree(eeprom_buff);
557 return ret_val;
558}
559
560static void e1000_get_drvinfo(struct net_device *netdev,
561 struct ethtool_drvinfo *drvinfo)
562{
563 struct e1000_adapter *adapter = netdev_priv(netdev);
564 char firmware_version[32];
565 u16 eeprom_data;
566
567 strncpy(drvinfo->driver, e1000e_driver_name, 32);
568 strncpy(drvinfo->version, e1000e_driver_version, 32);
569
570 /* EEPROM image version # is reported as firmware version # for
571 * PCI-E controllers */
572 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
573 sprintf(firmware_version, "%d.%d-%d",
574 (eeprom_data & 0xF000) >> 12,
575 (eeprom_data & 0x0FF0) >> 4,
576 eeprom_data & 0x000F);
577
578 strncpy(drvinfo->fw_version, firmware_version, 32);
579 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
580 drvinfo->n_stats = E1000_STATS_LEN;
581 drvinfo->testinfo_len = E1000_TEST_LEN;
582 drvinfo->regdump_len = e1000_get_regs_len(netdev);
583 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
584}
585
586static void e1000_get_ringparam(struct net_device *netdev,
587 struct ethtool_ringparam *ring)
588{
589 struct e1000_adapter *adapter = netdev_priv(netdev);
590 struct e1000_ring *tx_ring = adapter->tx_ring;
591 struct e1000_ring *rx_ring = adapter->rx_ring;
592
593 ring->rx_max_pending = E1000_MAX_RXD;
594 ring->tx_max_pending = E1000_MAX_TXD;
595 ring->rx_mini_max_pending = 0;
596 ring->rx_jumbo_max_pending = 0;
597 ring->rx_pending = rx_ring->count;
598 ring->tx_pending = tx_ring->count;
599 ring->rx_mini_pending = 0;
600 ring->rx_jumbo_pending = 0;
601}
602
603static int e1000_set_ringparam(struct net_device *netdev,
604 struct ethtool_ringparam *ring)
605{
606 struct e1000_adapter *adapter = netdev_priv(netdev);
607 struct e1000_ring *tx_ring, *tx_old;
608 struct e1000_ring *rx_ring, *rx_old;
609 int err;
610
611 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
612 return -EINVAL;
613
614 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
615 msleep(1);
616
617 if (netif_running(adapter->netdev))
618 e1000e_down(adapter);
619
620 tx_old = adapter->tx_ring;
621 rx_old = adapter->rx_ring;
622
623 err = -ENOMEM;
624 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
625 if (!tx_ring)
626 goto err_alloc_tx;
627
628 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
629 if (!rx_ring)
630 goto err_alloc_rx;
631
632 adapter->tx_ring = tx_ring;
633 adapter->rx_ring = rx_ring;
634
635 rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
636 rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
637 rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
638
639 tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
640 tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
641 tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
642
643 if (netif_running(adapter->netdev)) {
644 /* Try to get new resources before deleting old */
645 err = e1000e_setup_rx_resources(adapter);
646 if (err)
647 goto err_setup_rx;
648 err = e1000e_setup_tx_resources(adapter);
649 if (err)
650 goto err_setup_tx;
651
652 /* save the new, restore the old in order to free it,
653 * then restore the new back again */
654 adapter->rx_ring = rx_old;
655 adapter->tx_ring = tx_old;
656 e1000e_free_rx_resources(adapter);
657 e1000e_free_tx_resources(adapter);
658 kfree(tx_old);
659 kfree(rx_old);
660 adapter->rx_ring = rx_ring;
661 adapter->tx_ring = tx_ring;
662 err = e1000e_up(adapter);
663 if (err)
664 goto err_setup;
665 }
666
667 clear_bit(__E1000_RESETTING, &adapter->state);
668 return 0;
669err_setup_tx:
670 e1000e_free_rx_resources(adapter);
671err_setup_rx:
672 adapter->rx_ring = rx_old;
673 adapter->tx_ring = tx_old;
674 kfree(rx_ring);
675err_alloc_rx:
676 kfree(tx_ring);
677err_alloc_tx:
678 e1000e_up(adapter);
679err_setup:
680 clear_bit(__E1000_RESETTING, &adapter->state);
681 return err;
682}
683
684#define REG_PATTERN_TEST(R, M, W) REG_PATTERN_TEST_ARRAY(R, 0, M, W)
685#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, writeable) \
686{ \
687 u32 _pat; \
688 u32 _value; \
689 u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
690 for (_pat = 0; _pat < ARRAY_SIZE(_test); _pat++) { \
691 E1000_WRITE_REG_ARRAY(hw, reg, offset, \
692 (_test[_pat] & writeable)); \
693 _value = E1000_READ_REG_ARRAY(hw, reg, offset); \
694 if (_value != (_test[_pat] & writeable & mask)) { \
695 ndev_err(netdev, "pattern test reg %04X " \
696 "failed: got 0x%08X expected 0x%08X\n", \
697 reg + offset, \
698 value, (_test[_pat] & writeable & mask)); \
699 *data = reg; \
700 return 1; \
701 } \
702 } \
703}
704
705#define REG_SET_AND_CHECK(R, M, W) \
706{ \
707 u32 _value; \
708 __ew32(hw, R, W & M); \
709 _value = __er32(hw, R); \
710 if ((W & M) != (_value & M)) { \
711 ndev_err(netdev, "set/check reg %04X test failed: " \
712 "got 0x%08X expected 0x%08X\n", R, (_value & M), \
713 (W & M)); \
714 *data = R; \
715 return 1; \
716 } \
717}
718
719static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
720{
721 struct e1000_hw *hw = &adapter->hw;
722 struct e1000_mac_info *mac = &adapter->hw.mac;
723 struct net_device *netdev = adapter->netdev;
724 u32 value;
725 u32 before;
726 u32 after;
727 u32 i;
728 u32 toggle;
729
730 /* The status register is Read Only, so a write should fail.
731 * Some bits that get toggled are ignored.
732 */
733 switch (mac->type) {
734 /* there are several bits on newer hardware that are r/w */
735 case e1000_82571:
736 case e1000_82572:
737 case e1000_80003es2lan:
738 toggle = 0x7FFFF3FF;
739 break;
740 case e1000_82573:
741 case e1000_ich8lan:
742 case e1000_ich9lan:
743 toggle = 0x7FFFF033;
744 break;
745 default:
746 toggle = 0xFFFFF833;
747 break;
748 }
749
750 before = er32(STATUS);
751 value = (er32(STATUS) & toggle);
752 ew32(STATUS, toggle);
753 after = er32(STATUS) & toggle;
754 if (value != after) {
755 ndev_err(netdev, "failed STATUS register test got: "
756 "0x%08X expected: 0x%08X\n", after, value);
757 *data = 1;
758 return 1;
759 }
760 /* restore previous status */
761 ew32(STATUS, before);
762
763 if ((mac->type != e1000_ich8lan) &&
764 (mac->type != e1000_ich9lan)) {
765 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
766 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
767 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
768 REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
769 }
770
771 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
772 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
773 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
774 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
775 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
776 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
777 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
778 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
779 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
780 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
781
782 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
783
784 before = (((mac->type == e1000_ich8lan) ||
785 (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
786 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
787 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
788
789 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF);
790 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFF000, 0xFFFFFFFF);
791 REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF);
792 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFF000, 0xFFFFFFFF);
793
794 for (i = 0; i < mac->mta_reg_count; i++)
795 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
796
797 *data = 0;
798 return 0;
799}
800
801static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
802{
803 u16 temp;
804 u16 checksum = 0;
805 u16 i;
806
807 *data = 0;
808 /* Read and add up the contents of the EEPROM */
809 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
810 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
811 *data = 1;
812 break;
813 }
814 checksum += temp;
815 }
816
817 /* If Checksum is not Correct return error else test passed */
818 if ((checksum != (u16) NVM_SUM) && !(*data))
819 *data = 2;
820
821 return *data;
822}
823
824static irqreturn_t e1000_test_intr(int irq, void *data)
825{
826 struct net_device *netdev = (struct net_device *) data;
827 struct e1000_adapter *adapter = netdev_priv(netdev);
828 struct e1000_hw *hw = &adapter->hw;
829
830 adapter->test_icr |= er32(ICR);
831
832 return IRQ_HANDLED;
833}
834
835static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
836{
837 struct net_device *netdev = adapter->netdev;
838 struct e1000_hw *hw = &adapter->hw;
839 u32 mask;
840 u32 shared_int = 1;
841 u32 irq = adapter->pdev->irq;
842 int i;
843
844 *data = 0;
845
846 /* NOTE: we don't test MSI interrupts here, yet */
847 /* Hook up test interrupt handler just for this test */
848 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
849 netdev)) {
850 shared_int = 0;
851 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
852 netdev->name, netdev)) {
853 *data = 1;
854 return -1;
855 }
856 ndev_info(netdev, "testing %s interrupt\n",
857 (shared_int ? "shared" : "unshared"));
858
859 /* Disable all the interrupts */
860 ew32(IMC, 0xFFFFFFFF);
861 msleep(10);
862
863 /* Test each interrupt */
864 for (i = 0; i < 10; i++) {
865
866 if (((adapter->hw.mac.type == e1000_ich8lan) ||
867 (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
868 continue;
869
870 /* Interrupt to test */
871 mask = 1 << i;
872
873 if (!shared_int) {
874 /* Disable the interrupt to be reported in
875 * the cause register and then force the same
876 * interrupt and see if one gets posted. If
877 * an interrupt was posted to the bus, the
878 * test failed.
879 */
880 adapter->test_icr = 0;
881 ew32(IMC, mask);
882 ew32(ICS, mask);
883 msleep(10);
884
885 if (adapter->test_icr & mask) {
886 *data = 3;
887 break;
888 }
889 }
890
891 /* Enable the interrupt to be reported in
892 * the cause register and then force the same
893 * interrupt and see if one gets posted. If
894 * an interrupt was not posted to the bus, the
895 * test failed.
896 */
897 adapter->test_icr = 0;
898 ew32(IMS, mask);
899 ew32(ICS, mask);
900 msleep(10);
901
902 if (!(adapter->test_icr & mask)) {
903 *data = 4;
904 break;
905 }
906
907 if (!shared_int) {
908 /* Disable the other interrupts to be reported in
909 * the cause register and then force the other
910 * interrupts and see if any get posted. If
911 * an interrupt was posted to the bus, the
912 * test failed.
913 */
914 adapter->test_icr = 0;
915 ew32(IMC, ~mask & 0x00007FFF);
916 ew32(ICS, ~mask & 0x00007FFF);
917 msleep(10);
918
919 if (adapter->test_icr) {
920 *data = 5;
921 break;
922 }
923 }
924 }
925
926 /* Disable all the interrupts */
927 ew32(IMC, 0xFFFFFFFF);
928 msleep(10);
929
930 /* Unhook test interrupt handler */
931 free_irq(irq, netdev);
932
933 return *data;
934}
935
936static void e1000_free_desc_rings(struct e1000_adapter *adapter)
937{
938 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
939 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
940 struct pci_dev *pdev = adapter->pdev;
941 int i;
942
943 if (tx_ring->desc && tx_ring->buffer_info) {
944 for (i = 0; i < tx_ring->count; i++) {
945 if (tx_ring->buffer_info[i].dma)
946 pci_unmap_single(pdev,
947 tx_ring->buffer_info[i].dma,
948 tx_ring->buffer_info[i].length,
949 PCI_DMA_TODEVICE);
950 if (tx_ring->buffer_info[i].skb)
951 dev_kfree_skb(tx_ring->buffer_info[i].skb);
952 }
953 }
954
955 if (rx_ring->desc && rx_ring->buffer_info) {
956 for (i = 0; i < rx_ring->count; i++) {
957 if (rx_ring->buffer_info[i].dma)
958 pci_unmap_single(pdev,
959 rx_ring->buffer_info[i].dma,
960 2048, PCI_DMA_FROMDEVICE);
961 if (rx_ring->buffer_info[i].skb)
962 dev_kfree_skb(rx_ring->buffer_info[i].skb);
963 }
964 }
965
966 if (tx_ring->desc) {
967 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
968 tx_ring->dma);
969 tx_ring->desc = NULL;
970 }
971 if (rx_ring->desc) {
972 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
973 rx_ring->dma);
974 rx_ring->desc = NULL;
975 }
976
977 kfree(tx_ring->buffer_info);
978 tx_ring->buffer_info = NULL;
979 kfree(rx_ring->buffer_info);
980 rx_ring->buffer_info = NULL;
981}
982
983static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
984{
985 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
986 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
987 struct pci_dev *pdev = adapter->pdev;
988 struct e1000_hw *hw = &adapter->hw;
989 u32 rctl;
990 int size;
991 int i;
992 int ret_val;
993
994 /* Setup Tx descriptor ring and Tx buffers */
995
996 if (!tx_ring->count)
997 tx_ring->count = E1000_DEFAULT_TXD;
998
999 size = tx_ring->count * sizeof(struct e1000_buffer);
1000 tx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
1001 if (!tx_ring->buffer_info) {
1002 ret_val = 1;
1003 goto err_nomem;
1004 }
1005 memset(tx_ring->buffer_info, 0, size);
1006
1007 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1008 tx_ring->size = ALIGN(tx_ring->size, 4096);
1009 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1010 &tx_ring->dma, GFP_KERNEL);
1011 if (!tx_ring->desc) {
1012 ret_val = 2;
1013 goto err_nomem;
1014 }
1015 memset(tx_ring->desc, 0, tx_ring->size);
1016 tx_ring->next_to_use = 0;
1017 tx_ring->next_to_clean = 0;
1018
1019 ew32(TDBAL,
1020 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1021 ew32(TDBAH, ((u64) tx_ring->dma >> 32));
1022 ew32(TDLEN,
1023 tx_ring->count * sizeof(struct e1000_tx_desc));
1024 ew32(TDH, 0);
1025 ew32(TDT, 0);
1026 ew32(TCTL,
1027 E1000_TCTL_PSP | E1000_TCTL_EN |
1028 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1029 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1030
1031 for (i = 0; i < tx_ring->count; i++) {
1032 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1033 struct sk_buff *skb;
1034 unsigned int skb_size = 1024;
1035
1036 skb = alloc_skb(skb_size, GFP_KERNEL);
1037 if (!skb) {
1038 ret_val = 3;
1039 goto err_nomem;
1040 }
1041 skb_put(skb, skb_size);
1042 tx_ring->buffer_info[i].skb = skb;
1043 tx_ring->buffer_info[i].length = skb->len;
1044 tx_ring->buffer_info[i].dma =
1045 pci_map_single(pdev, skb->data, skb->len,
1046 PCI_DMA_TODEVICE);
1047 if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
1048 ret_val = 4;
1049 goto err_nomem;
1050 }
1051 tx_desc->buffer_addr = cpu_to_le64(
1052 tx_ring->buffer_info[i].dma);
1053 tx_desc->lower.data = cpu_to_le32(skb->len);
1054 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1055 E1000_TXD_CMD_IFCS |
1056 E1000_TXD_CMD_RPS);
1057 tx_desc->upper.data = 0;
1058 }
1059
1060 /* Setup Rx descriptor ring and Rx buffers */
1061
1062 if (!rx_ring->count)
1063 rx_ring->count = E1000_DEFAULT_RXD;
1064
1065 size = rx_ring->count * sizeof(struct e1000_buffer);
1066 rx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
1067 if (!rx_ring->buffer_info) {
1068 ret_val = 5;
1069 goto err_nomem;
1070 }
1071 memset(rx_ring->buffer_info, 0, size);
1072
1073 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1074 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1075 &rx_ring->dma, GFP_KERNEL);
1076 if (!rx_ring->desc) {
1077 ret_val = 6;
1078 goto err_nomem;
1079 }
1080 memset(rx_ring->desc, 0, rx_ring->size);
1081 rx_ring->next_to_use = 0;
1082 rx_ring->next_to_clean = 0;
1083
1084 rctl = er32(RCTL);
1085 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1086 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1087 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1088 ew32(RDLEN, rx_ring->size);
1089 ew32(RDH, 0);
1090 ew32(RDT, 0);
1091 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1092 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1093 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1094 ew32(RCTL, rctl);
1095
1096 for (i = 0; i < rx_ring->count; i++) {
1097 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
1098 struct sk_buff *skb;
1099
1100 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
1101 if (!skb) {
1102 ret_val = 7;
1103 goto err_nomem;
1104 }
1105 skb_reserve(skb, NET_IP_ALIGN);
1106 rx_ring->buffer_info[i].skb = skb;
1107 rx_ring->buffer_info[i].dma =
1108 pci_map_single(pdev, skb->data, 2048,
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
1111 ret_val = 8;
1112 goto err_nomem;
1113 }
1114 rx_desc->buffer_addr =
1115 cpu_to_le64(rx_ring->buffer_info[i].dma);
1116 memset(skb->data, 0x00, skb->len);
1117 }
1118
1119 return 0;
1120
1121err_nomem:
1122 e1000_free_desc_rings(adapter);
1123 return ret_val;
1124}
1125
1126static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1127{
1128 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1129 e1e_wphy(&adapter->hw, 29, 0x001F);
1130 e1e_wphy(&adapter->hw, 30, 0x8FFC);
1131 e1e_wphy(&adapter->hw, 29, 0x001A);
1132 e1e_wphy(&adapter->hw, 30, 0x8FF0);
1133}
1134
1135static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1136{
1137 struct e1000_hw *hw = &adapter->hw;
1138 u32 ctrl_reg = 0;
1139 u32 stat_reg = 0;
1140
1141 adapter->hw.mac.autoneg = 0;
1142
1143 if (adapter->hw.phy.type == e1000_phy_m88) {
1144 /* Auto-MDI/MDIX Off */
1145 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1146 /* reset to update Auto-MDI/MDIX */
1147 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1148 /* autoneg off */
1149 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1150 } else if (adapter->hw.phy.type == e1000_phy_gg82563)
1151 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1152
1153 ctrl_reg = er32(CTRL);
1154
1155 if (adapter->hw.phy.type == e1000_phy_ife) {
1156 /* force 100, set loopback */
1157 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1158
1159 /* Now set up the MAC to the same speed/duplex as the PHY. */
1160 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1161 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1162 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1163 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1164 E1000_CTRL_FD); /* Force Duplex to FULL */
1165 } else {
1166 /* force 1000, set loopback */
1167 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1168
1169 /* Now set up the MAC to the same speed/duplex as the PHY. */
1170 ctrl_reg = er32(CTRL);
1171 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1172 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1173 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1174 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1175 E1000_CTRL_FD); /* Force Duplex to FULL */
1176 }
1177
1178 if (adapter->hw.media_type == e1000_media_type_copper &&
1179 adapter->hw.phy.type == e1000_phy_m88) {
1180 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1181 } else {
1182 /* Set the ILOS bit on the fiber Nic if half duplex link is
1183 * detected. */
1184 stat_reg = er32(STATUS);
1185 if ((stat_reg & E1000_STATUS_FD) == 0)
1186 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1187 }
1188
1189 ew32(CTRL, ctrl_reg);
1190
1191 /* Disable the receiver on the PHY so when a cable is plugged in, the
1192 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1193 */
1194 if (adapter->hw.phy.type == e1000_phy_m88)
1195 e1000_phy_disable_receiver(adapter);
1196
1197 udelay(500);
1198
1199 return 0;
1200}
1201
1202static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1203{
1204 struct e1000_hw *hw = &adapter->hw;
1205 u32 ctrl = er32(CTRL);
1206 int link = 0;
1207
1208 /* special requirements for 82571/82572 fiber adapters */
1209
1210 /* jump through hoops to make sure link is up because serdes
1211 * link is hardwired up */
1212 ctrl |= E1000_CTRL_SLU;
1213 ew32(CTRL, ctrl);
1214
1215 /* disable autoneg */
1216 ctrl = er32(TXCW);
1217 ctrl &= ~(1 << 31);
1218 ew32(TXCW, ctrl);
1219
1220 link = (er32(STATUS) & E1000_STATUS_LU);
1221
1222 if (!link) {
1223 /* set invert loss of signal */
1224 ctrl = er32(CTRL);
1225 ctrl |= E1000_CTRL_ILOS;
1226 ew32(CTRL, ctrl);
1227 }
1228
1229 /* special write to serdes control register to enable SerDes analog
1230 * loopback */
1231#define E1000_SERDES_LB_ON 0x410
1232 ew32(SCTL, E1000_SERDES_LB_ON);
1233 msleep(10);
1234
1235 return 0;
1236}
1237
1238/* only call this for fiber/serdes connections to es2lan */
1239static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1240{
1241 struct e1000_hw *hw = &adapter->hw;
1242 u32 ctrlext = er32(CTRL_EXT);
1243 u32 ctrl = er32(CTRL);
1244
1245 /* save CTRL_EXT to restore later, reuse an empty variable (unused
1246 on mac_type 80003es2lan) */
1247 adapter->tx_fifo_head = ctrlext;
1248
1249 /* clear the serdes mode bits, putting the device into mac loopback */
1250 ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1251 ew32(CTRL_EXT, ctrlext);
1252
1253 /* force speed to 1000/FD, link up */
1254 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1255 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
1256 E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
1257 ew32(CTRL, ctrl);
1258
1259 /* set mac loopback */
1260 ctrl = er32(RCTL);
1261 ctrl |= E1000_RCTL_LBM_MAC;
1262 ew32(RCTL, ctrl);
1263
1264 /* set testing mode parameters (no need to reset later) */
1265#define KMRNCTRLSTA_OPMODE (0x1F << 16)
1266#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1267 ew32(KMRNCTRLSTA,
1268 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1269
1270 return 0;
1271}
1272
1273static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1274{
1275 struct e1000_hw *hw = &adapter->hw;
1276 u32 rctl;
1277
1278 if (hw->media_type == e1000_media_type_fiber ||
1279 hw->media_type == e1000_media_type_internal_serdes) {
1280 switch (hw->mac.type) {
1281 case e1000_80003es2lan:
1282 return e1000_set_es2lan_mac_loopback(adapter);
1283 break;
1284 case e1000_82571:
1285 case e1000_82572:
1286 return e1000_set_82571_fiber_loopback(adapter);
1287 break;
1288 default:
1289 rctl = er32(RCTL);
1290 rctl |= E1000_RCTL_LBM_TCVR;
1291 ew32(RCTL, rctl);
1292 return 0;
1293 }
1294 } else if (hw->media_type == e1000_media_type_copper) {
1295 return e1000_integrated_phy_loopback(adapter);
1296 }
1297
1298 return 7;
1299}
1300
1301static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1302{
1303 struct e1000_hw *hw = &adapter->hw;
1304 u32 rctl;
1305 u16 phy_reg;
1306
1307 rctl = er32(RCTL);
1308 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1309 ew32(RCTL, rctl);
1310
1311 switch (hw->mac.type) {
1312 case e1000_80003es2lan:
1313 if (hw->media_type == e1000_media_type_fiber ||
1314 hw->media_type == e1000_media_type_internal_serdes) {
1315 /* restore CTRL_EXT, stealing space from tx_fifo_head */
1316 ew32(CTRL_EXT,
1317 adapter->tx_fifo_head);
1318 adapter->tx_fifo_head = 0;
1319 }
1320 /* fall through */
1321 case e1000_82571:
1322 case e1000_82572:
1323 if (hw->media_type == e1000_media_type_fiber ||
1324 hw->media_type == e1000_media_type_internal_serdes) {
1325#define E1000_SERDES_LB_OFF 0x400
1326 ew32(SCTL, E1000_SERDES_LB_OFF);
1327 msleep(10);
1328 break;
1329 }
1330 /* Fall Through */
1331 default:
1332 hw->mac.autoneg = 1;
1333 if (hw->phy.type == e1000_phy_gg82563)
1334 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
1335 e1e_rphy(hw, PHY_CONTROL, &phy_reg);
1336 if (phy_reg & MII_CR_LOOPBACK) {
1337 phy_reg &= ~MII_CR_LOOPBACK;
1338 e1e_wphy(hw, PHY_CONTROL, phy_reg);
1339 e1000e_commit_phy(hw);
1340 }
1341 break;
1342 }
1343}
1344
1345static void e1000_create_lbtest_frame(struct sk_buff *skb,
1346 unsigned int frame_size)
1347{
1348 memset(skb->data, 0xFF, frame_size);
1349 frame_size &= ~1;
1350 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1351 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1352 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1353}
1354
1355static int e1000_check_lbtest_frame(struct sk_buff *skb,
1356 unsigned int frame_size)
1357{
1358 frame_size &= ~1;
1359 if (*(skb->data + 3) == 0xFF)
1360 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1361 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1362 return 0;
1363 return 13;
1364}
1365
1366static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1367{
1368 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1369 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1370 struct pci_dev *pdev = adapter->pdev;
1371 struct e1000_hw *hw = &adapter->hw;
1372 int i, j, k, l;
1373 int lc;
1374 int good_cnt;
1375 int ret_val = 0;
1376 unsigned long time;
1377
1378 ew32(RDT, rx_ring->count - 1);
1379
1380 /* Calculate the loop count based on the largest descriptor ring
1381 * The idea is to wrap the largest ring a number of times using 64
1382 * send/receive pairs during each loop
1383 */
1384
1385 if (rx_ring->count <= tx_ring->count)
1386 lc = ((tx_ring->count / 64) * 2) + 1;
1387 else
1388 lc = ((rx_ring->count / 64) * 2) + 1;
1389
1390 k = 0;
1391 l = 0;
1392 for (j = 0; j <= lc; j++) { /* loop count loop */
1393 for (i = 0; i < 64; i++) { /* send the packets */
1394 e1000_create_lbtest_frame(
1395 tx_ring->buffer_info[i].skb, 1024);
1396 pci_dma_sync_single_for_device(pdev,
1397 tx_ring->buffer_info[k].dma,
1398 tx_ring->buffer_info[k].length,
1399 PCI_DMA_TODEVICE);
1400 k++;
1401 if (k == tx_ring->count)
1402 k = 0;
1403 }
1404 ew32(TDT, k);
1405 msleep(200);
1406 time = jiffies; /* set the start time for the receive */
1407 good_cnt = 0;
1408 do { /* receive the sent packets */
1409 pci_dma_sync_single_for_cpu(pdev,
1410 rx_ring->buffer_info[l].dma, 2048,
1411 PCI_DMA_FROMDEVICE);
1412
1413 ret_val = e1000_check_lbtest_frame(
1414 rx_ring->buffer_info[l].skb, 1024);
1415 if (!ret_val)
1416 good_cnt++;
1417 l++;
1418 if (l == rx_ring->count)
1419 l = 0;
1420 /* time + 20 msecs (200 msecs on 2.4) is more than
1421 * enough time to complete the receives, if it's
1422 * exceeded, break and error off
1423 */
1424 } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1425 if (good_cnt != 64) {
1426 ret_val = 13; /* ret_val is the same as mis-compare */
1427 break;
1428 }
1429 if (jiffies >= (time + 2)) {
1430 ret_val = 14; /* error code for time out error */
1431 break;
1432 }
1433 } /* end loop count loop */
1434 return ret_val;
1435}
1436
1437static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1438{
1439 /* PHY loopback cannot be performed if SoL/IDER
1440 * sessions are active */
1441 if (e1000_check_reset_block(&adapter->hw)) {
1442 ndev_err(adapter->netdev, "Cannot do PHY loopback test "
1443 "when SoL/IDER is active.\n");
1444 *data = 0;
1445 goto out;
1446 }
1447
1448 *data = e1000_setup_desc_rings(adapter);
1449 if (data)
1450 goto out;
1451
1452 *data = e1000_setup_loopback_test(adapter);
1453 if (data)
1454 goto err_loopback;
1455
1456 *data = e1000_run_loopback_test(adapter);
1457 e1000_loopback_cleanup(adapter);
1458
1459err_loopback:
1460 e1000_free_desc_rings(adapter);
1461out:
1462 return *data;
1463}
1464
1465static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1466{
1467 struct e1000_hw *hw = &adapter->hw;
1468
1469 *data = 0;
1470 if (hw->media_type == e1000_media_type_internal_serdes) {
1471 int i = 0;
1472 hw->mac.serdes_has_link = 0;
1473
1474 /* On some blade server designs, link establishment
1475 * could take as long as 2-3 minutes */
1476 do {
1477 hw->mac.ops.check_for_link(hw);
1478 if (hw->mac.serdes_has_link)
1479 return *data;
1480 msleep(20);
1481 } while (i++ < 3750);
1482
1483 *data = 1;
1484 } else {
1485 hw->mac.ops.check_for_link(hw);
1486 if (hw->mac.autoneg)
1487 msleep(4000);
1488
1489 if (!(er32(STATUS) &
1490 E1000_STATUS_LU))
1491 *data = 1;
1492 }
1493 return *data;
1494}
1495
1496static int e1000_diag_test_count(struct net_device *netdev)
1497{
1498 return E1000_TEST_LEN;
1499}
1500
1501static void e1000_diag_test(struct net_device *netdev,
1502 struct ethtool_test *eth_test, u64 *data)
1503{
1504 struct e1000_adapter *adapter = netdev_priv(netdev);
1505 u16 autoneg_advertised;
1506 u8 forced_speed_duplex;
1507 u8 autoneg;
1508 bool if_running = netif_running(netdev);
1509
1510 set_bit(__E1000_TESTING, &adapter->state);
1511 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1512 /* Offline tests */
1513
1514 /* save speed, duplex, autoneg settings */
1515 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1516 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1517 autoneg = adapter->hw.mac.autoneg;
1518
1519 ndev_info(netdev, "offline testing starting\n");
1520
1521 /* Link test performed before hardware reset so autoneg doesn't
1522 * interfere with test result */
1523 if (e1000_link_test(adapter, &data[4]))
1524 eth_test->flags |= ETH_TEST_FL_FAILED;
1525
1526 if (if_running)
1527 /* indicate we're in test mode */
1528 dev_close(netdev);
1529 else
1530 e1000e_reset(adapter);
1531
1532 if (e1000_reg_test(adapter, &data[0]))
1533 eth_test->flags |= ETH_TEST_FL_FAILED;
1534
1535 e1000e_reset(adapter);
1536 if (e1000_eeprom_test(adapter, &data[1]))
1537 eth_test->flags |= ETH_TEST_FL_FAILED;
1538
1539 e1000e_reset(adapter);
1540 if (e1000_intr_test(adapter, &data[2]))
1541 eth_test->flags |= ETH_TEST_FL_FAILED;
1542
1543 e1000e_reset(adapter);
1544 /* make sure the phy is powered up */
1545 e1000e_power_up_phy(adapter);
1546 if (e1000_loopback_test(adapter, &data[3]))
1547 eth_test->flags |= ETH_TEST_FL_FAILED;
1548
1549 /* restore speed, duplex, autoneg settings */
1550 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1551 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1552 adapter->hw.mac.autoneg = autoneg;
1553
1554 /* force this routine to wait until autoneg complete/timeout */
1555 adapter->hw.phy.wait_for_link = 1;
1556 e1000e_reset(adapter);
1557 adapter->hw.phy.wait_for_link = 0;
1558
1559 clear_bit(__E1000_TESTING, &adapter->state);
1560 if (if_running)
1561 dev_open(netdev);
1562 } else {
1563 ndev_info(netdev, "online testing starting\n");
1564 /* Online tests */
1565 if (e1000_link_test(adapter, &data[4]))
1566 eth_test->flags |= ETH_TEST_FL_FAILED;
1567
1568 /* Online tests aren't run; pass by default */
1569 data[0] = 0;
1570 data[1] = 0;
1571 data[2] = 0;
1572 data[3] = 0;
1573
1574 clear_bit(__E1000_TESTING, &adapter->state);
1575 }
1576 msleep_interruptible(4 * 1000);
1577}
1578
1579static void e1000_get_wol(struct net_device *netdev,
1580 struct ethtool_wolinfo *wol)
1581{
1582 struct e1000_adapter *adapter = netdev_priv(netdev);
1583
1584 wol->supported = 0;
1585 wol->wolopts = 0;
1586
1587 if (!(adapter->flags & FLAG_HAS_WOL))
1588 return;
1589
1590 wol->supported = WAKE_UCAST | WAKE_MCAST |
1591 WAKE_BCAST | WAKE_MAGIC;
1592
1593 /* apply any specific unsupported masks here */
1594 if (adapter->flags & FLAG_NO_WAKE_UCAST) {
1595 wol->supported &= ~WAKE_UCAST;
1596
1597 if (adapter->wol & E1000_WUFC_EX)
1598 ndev_err(netdev, "Interface does not support "
1599 "directed (unicast) frame wake-up packets\n");
1600 }
1601
1602 if (adapter->wol & E1000_WUFC_EX)
1603 wol->wolopts |= WAKE_UCAST;
1604 if (adapter->wol & E1000_WUFC_MC)
1605 wol->wolopts |= WAKE_MCAST;
1606 if (adapter->wol & E1000_WUFC_BC)
1607 wol->wolopts |= WAKE_BCAST;
1608 if (adapter->wol & E1000_WUFC_MAG)
1609 wol->wolopts |= WAKE_MAGIC;
1610}
1611
1612static int e1000_set_wol(struct net_device *netdev,
1613 struct ethtool_wolinfo *wol)
1614{
1615 struct e1000_adapter *adapter = netdev_priv(netdev);
1616
1617 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1618 return -EOPNOTSUPP;
1619
1620 if (!(adapter->flags & FLAG_HAS_WOL))
1621 return wol->wolopts ? -EOPNOTSUPP : 0;
1622
1623 /* these settings will always override what we currently have */
1624 adapter->wol = 0;
1625
1626 if (wol->wolopts & WAKE_UCAST)
1627 adapter->wol |= E1000_WUFC_EX;
1628 if (wol->wolopts & WAKE_MCAST)
1629 adapter->wol |= E1000_WUFC_MC;
1630 if (wol->wolopts & WAKE_BCAST)
1631 adapter->wol |= E1000_WUFC_BC;
1632 if (wol->wolopts & WAKE_MAGIC)
1633 adapter->wol |= E1000_WUFC_MAG;
1634
1635 return 0;
1636}
1637
1638/* toggle LED 4 times per second = 2 "blinks" per second */
1639#define E1000_ID_INTERVAL (HZ/4)
1640
1641/* bit defines for adapter->led_status */
1642#define E1000_LED_ON 0
1643
1644static void e1000_led_blink_callback(unsigned long data)
1645{
1646 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1647
1648 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1649 adapter->hw.mac.ops.led_off(&adapter->hw);
1650 else
1651 adapter->hw.mac.ops.led_on(&adapter->hw);
1652
1653 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
1654}
1655
1656static int e1000_phys_id(struct net_device *netdev, u32 data)
1657{
1658 struct e1000_adapter *adapter = netdev_priv(netdev);
1659
1660 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1661 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
1662
1663 if (adapter->hw.phy.type == e1000_phy_ife) {
1664 if (!adapter->blink_timer.function) {
1665 init_timer(&adapter->blink_timer);
1666 adapter->blink_timer.function =
1667 e1000_led_blink_callback;
1668 adapter->blink_timer.data = (unsigned long) adapter;
1669 }
1670 mod_timer(&adapter->blink_timer, jiffies);
1671 msleep_interruptible(data * 1000);
1672 del_timer_sync(&adapter->blink_timer);
1673 e1e_wphy(&adapter->hw,
1674 IFE_PHY_SPECIAL_CONTROL_LED, 0);
1675 } else {
1676 e1000e_blink_led(&adapter->hw);
1677 msleep_interruptible(data * 1000);
1678 }
1679
1680 adapter->hw.mac.ops.led_off(&adapter->hw);
1681 clear_bit(E1000_LED_ON, &adapter->led_status);
1682 adapter->hw.mac.ops.cleanup_led(&adapter->hw);
1683
1684 return 0;
1685}
1686
1687static int e1000_nway_reset(struct net_device *netdev)
1688{
1689 struct e1000_adapter *adapter = netdev_priv(netdev);
1690 if (netif_running(netdev))
1691 e1000e_reinit_locked(adapter);
1692 return 0;
1693}
1694
1695static int e1000_get_stats_count(struct net_device *netdev)
1696{
1697 return E1000_STATS_LEN;
1698}
1699
1700static void e1000_get_ethtool_stats(struct net_device *netdev,
1701 struct ethtool_stats *stats,
1702 u64 *data)
1703{
1704 struct e1000_adapter *adapter = netdev_priv(netdev);
1705 int i;
1706
1707 e1000e_update_stats(adapter);
1708 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1709 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1710 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1711 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1712 }
1713}
1714
1715static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1716 u8 *data)
1717{
1718 u8 *p = data;
1719 int i;
1720
1721 switch (stringset) {
1722 case ETH_SS_TEST:
1723 memcpy(data, *e1000_gstrings_test,
1724 E1000_TEST_LEN*ETH_GSTRING_LEN);
1725 break;
1726 case ETH_SS_STATS:
1727 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1728 memcpy(p, e1000_gstrings_stats[i].stat_string,
1729 ETH_GSTRING_LEN);
1730 p += ETH_GSTRING_LEN;
1731 }
1732 break;
1733 }
1734}
1735
1736static const struct ethtool_ops e1000_ethtool_ops = {
1737 .get_settings = e1000_get_settings,
1738 .set_settings = e1000_set_settings,
1739 .get_drvinfo = e1000_get_drvinfo,
1740 .get_regs_len = e1000_get_regs_len,
1741 .get_regs = e1000_get_regs,
1742 .get_wol = e1000_get_wol,
1743 .set_wol = e1000_set_wol,
1744 .get_msglevel = e1000_get_msglevel,
1745 .set_msglevel = e1000_set_msglevel,
1746 .nway_reset = e1000_nway_reset,
1747 .get_link = ethtool_op_get_link,
1748 .get_eeprom_len = e1000_get_eeprom_len,
1749 .get_eeprom = e1000_get_eeprom,
1750 .set_eeprom = e1000_set_eeprom,
1751 .get_ringparam = e1000_get_ringparam,
1752 .set_ringparam = e1000_set_ringparam,
1753 .get_pauseparam = e1000_get_pauseparam,
1754 .set_pauseparam = e1000_set_pauseparam,
1755 .get_rx_csum = e1000_get_rx_csum,
1756 .set_rx_csum = e1000_set_rx_csum,
1757 .get_tx_csum = e1000_get_tx_csum,
1758 .set_tx_csum = e1000_set_tx_csum,
1759 .get_sg = ethtool_op_get_sg,
1760 .set_sg = ethtool_op_set_sg,
1761 .get_tso = ethtool_op_get_tso,
1762 .set_tso = e1000_set_tso,
1763 .self_test_count = e1000_diag_test_count,
1764 .self_test = e1000_diag_test,
1765 .get_strings = e1000_get_strings,
1766 .phys_id = e1000_phys_id,
1767 .get_stats_count = e1000_get_stats_count,
1768 .get_ethtool_stats = e1000_get_ethtool_stats,
1769};
1770
1771void e1000e_set_ethtool_ops(struct net_device *netdev)
1772{
1773 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
1774}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
new file mode 100644
index 000000000000..848217a38259
--- /dev/null
+++ b/drivers/net/e1000e/hw.h
@@ -0,0 +1,864 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _E1000_HW_H_
30#define _E1000_HW_H_
31
32#include <linux/types.h>
33
34struct e1000_hw;
35struct e1000_adapter;
36
37#include "defines.h"
38
39#define er32(reg) __er32(hw, E1000_##reg)
40#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
41#define e1e_flush() er32(STATUS)
42
43#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
44 (writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
45
46#define E1000_READ_REG_ARRAY(a, reg, offset) \
47 (readl((a)->hw_addr + reg + ((offset) << 2)))
48
49enum e1e_registers {
50 E1000_CTRL = 0x00000, /* Device Control - RW */
51 E1000_STATUS = 0x00008, /* Device Status - RO */
52 E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
53 E1000_EERD = 0x00014, /* EEPROM Read - RW */
54 E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
55 E1000_FLA = 0x0001C, /* Flash Access - RW */
56 E1000_MDIC = 0x00020, /* MDI Control - RW */
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
61 E1000_FCT = 0x00030, /* Flow Control Type - RW */
62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
63 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
64 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
65 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
69 E1000_RCTL = 0x00100, /* RX Control - RW */
70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
71 E1000_TXCW = 0x00178, /* TX Configuration Word - RW */
72 E1000_RXCW = 0x00180, /* RX Configuration Word - RO */
73 E1000_TCTL = 0x00400, /* TX Control - RW */
74 E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */
75 E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */
76 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */
77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */
78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
80 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
81 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
82 E1000_PBS = 0x01008, /* Packet Buffer Size */
83 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
84 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
85 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
86 E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
90 E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */
91 E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */
92 E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */
93 E1000_RDH = 0x02810, /* RX Descriptor Head - RW */
94 E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */
95 E1000_RDTR = 0x02820, /* RX Delay Timer - RW */
96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
97
98/* Convenience macros
99 *
100 * Note: "_n" is the queue number of the register to be written to.
101 *
102 * Example usage:
103 * E1000_RDBAL_REG(current_rx_queue)
104 *
105 */
106#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
107 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
108 E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */
109 E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */
110 E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */
111 E1000_TDH = 0x03810, /* TX Descriptor Head - RW */
112 E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */
113 E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */
114 E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */
115 E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */
116 E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */
117 E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */
118 E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */
119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
122 E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
123 E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
124 E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
125 E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
126 E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
127 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
128 E1000_COLC = 0x04028, /* Collision Count - R/clr */
129 E1000_DC = 0x04030, /* Defer Count - R/clr */
130 E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */
131 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
132 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
133 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
134 E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */
135 E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */
136 E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */
137 E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */
138 E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */
139 E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */
140 E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */
141 E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */
142 E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */
143 E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */
144 E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */
145 E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */
146 E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */
147 E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */
148 E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */
149 E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */
150 E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */
151 E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */
152 E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */
153 E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */
154 E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */
155 E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */
156 E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */
157 E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */
158 E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */
159 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
160 E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */
161 E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */
162 E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */
163 E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */
164 E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */
165 E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */
166 E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */
167 E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */
168 E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */
169 E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */
170 E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */
171 E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */
172 E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */
173 E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */
174 E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */
175 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */
176 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */
177 E1000_IAC = 0x04100, /* Interrupt Assertion Count */
178 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
179 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
180 E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
181 E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
182 E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
183 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
186 E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */
187 E1000_RFCTL = 0x05008, /* Receive Filter Control*/
188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
189 E1000_RA = 0x05400, /* Receive Address - RW Array */
190 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
191 E1000_WUC = 0x05800, /* Wakeup Control - RW */
192 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
193 E1000_WUS = 0x05810, /* Wakeup Status - RO */
194 E1000_MANC = 0x05820, /* Management Control - RW */
195 E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
196 E1000_HOST_IF = 0x08800, /* Host Interface */
197
198 E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
199 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
200 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
201 E1000_GCR = 0x05B00, /* PCI-Ex Control */
202 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
203 E1000_SWSM = 0x05B50, /* SW Semaphore */
204 E1000_FWSM = 0x05B54, /* FW Semaphore */
205 E1000_HICR = 0x08F00, /* Host Inteface Control */
206};
207
208/* RSS registers */
209
210/* IGP01E1000 Specific Registers */
211#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
212#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
213#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
214#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
215#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
216#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
217
218#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
219#define IGP01E1000_PHY_POLARITY_MASK 0x0078
220
221#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
222#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
223
224#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
225
226#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
227#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
228#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
229
230#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
231
232#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
233#define IGP01E1000_PSSR_MDIX 0x0008
234#define IGP01E1000_PSSR_SPEED_MASK 0xC000
235#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
236
237#define IGP02E1000_PHY_CHANNEL_NUM 4
238#define IGP02E1000_PHY_AGC_A 0x11B1
239#define IGP02E1000_PHY_AGC_B 0x12B1
240#define IGP02E1000_PHY_AGC_C 0x14B1
241#define IGP02E1000_PHY_AGC_D 0x18B1
242
243#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
244#define IGP02E1000_AGC_LENGTH_MASK 0x7F
245#define IGP02E1000_AGC_RANGE 15
246
247/* manage.c */
248#define E1000_VFTA_ENTRY_SHIFT 5
249#define E1000_VFTA_ENTRY_MASK 0x7F
250#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
251
252#define E1000_HICR_EN 0x01 /* Enable bit - RO */
253#define E1000_HICR_C 0x02 /* Driver sets this bit when done
254 * to put command in RAM */
255#define E1000_HICR_FW_RESET_ENABLE 0x40
256#define E1000_HICR_FW_RESET 0x80
257
258#define E1000_FWSM_MODE_MASK 0xE
259#define E1000_FWSM_MODE_SHIFT 1
260
261#define E1000_MNG_IAMT_MODE 0x3
262#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
263#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
264#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
265#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
266#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
267#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
268
269/* nvm.c */
270#define E1000_STM_OPCODE 0xDB00
271
272#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
273#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
274#define E1000_KMRNCTRLSTA_REN 0x00200000
275#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
276#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
277
278#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
279#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
280#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
281#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
282
283/* IFE PHY Extended Status Control */
284#define IFE_PESC_POLARITY_REVERSED 0x0100
285
286/* IFE PHY Special Control */
287#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
288#define IFE_PSC_FORCE_POLARITY 0x0020
289
290/* IFE PHY Special Control and LED Control */
291#define IFE_PSCL_PROBE_MODE 0x0020
292#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
293#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
294
295/* IFE PHY MDIX Control */
296#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
297#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
298#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
299
300#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
301
302#define E1000_DEV_ID_82571EB_COPPER 0x105E
303#define E1000_DEV_ID_82571EB_FIBER 0x105F
304#define E1000_DEV_ID_82571EB_SERDES 0x1060
305#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
306#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
307#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
308#define E1000_DEV_ID_82572EI_COPPER 0x107D
309#define E1000_DEV_ID_82572EI_FIBER 0x107E
310#define E1000_DEV_ID_82572EI_SERDES 0x107F
311#define E1000_DEV_ID_82572EI 0x10B9
312#define E1000_DEV_ID_82573E 0x108B
313#define E1000_DEV_ID_82573E_IAMT 0x108C
314#define E1000_DEV_ID_82573L 0x109A
315
316#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
317#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
318#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
319#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
320
321#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
322#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
323#define E1000_DEV_ID_ICH8_IGP_C 0x104B
324#define E1000_DEV_ID_ICH8_IFE 0x104C
325#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
326#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
327#define E1000_DEV_ID_ICH8_IGP_M 0x104D
328#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
329#define E1000_DEV_ID_ICH9_IGP_C 0x294C
330#define E1000_DEV_ID_ICH9_IFE 0x10C0
331#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
332#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
333
334#define E1000_FUNC_1 1
335
336enum e1000_mac_type {
337 e1000_82571,
338 e1000_82572,
339 e1000_82573,
340 e1000_80003es2lan,
341 e1000_ich8lan,
342 e1000_ich9lan,
343};
344
345enum e1000_media_type {
346 e1000_media_type_unknown = 0,
347 e1000_media_type_copper = 1,
348 e1000_media_type_fiber = 2,
349 e1000_media_type_internal_serdes = 3,
350 e1000_num_media_types
351};
352
353enum e1000_nvm_type {
354 e1000_nvm_unknown = 0,
355 e1000_nvm_none,
356 e1000_nvm_eeprom_spi,
357 e1000_nvm_flash_hw,
358 e1000_nvm_flash_sw
359};
360
361enum e1000_nvm_override {
362 e1000_nvm_override_none = 0,
363 e1000_nvm_override_spi_small,
364 e1000_nvm_override_spi_large
365};
366
367enum e1000_phy_type {
368 e1000_phy_unknown = 0,
369 e1000_phy_none,
370 e1000_phy_m88,
371 e1000_phy_igp,
372 e1000_phy_igp_2,
373 e1000_phy_gg82563,
374 e1000_phy_igp_3,
375 e1000_phy_ife,
376};
377
378enum e1000_bus_width {
379 e1000_bus_width_unknown = 0,
380 e1000_bus_width_pcie_x1,
381 e1000_bus_width_pcie_x2,
382 e1000_bus_width_pcie_x4 = 4,
383 e1000_bus_width_32,
384 e1000_bus_width_64,
385 e1000_bus_width_reserved
386};
387
388enum e1000_1000t_rx_status {
389 e1000_1000t_rx_status_not_ok = 0,
390 e1000_1000t_rx_status_ok,
391 e1000_1000t_rx_status_undefined = 0xFF
392};
393
394enum e1000_rev_polarity{
395 e1000_rev_polarity_normal = 0,
396 e1000_rev_polarity_reversed,
397 e1000_rev_polarity_undefined = 0xFF
398};
399
400enum e1000_fc_mode {
401 e1000_fc_none = 0,
402 e1000_fc_rx_pause,
403 e1000_fc_tx_pause,
404 e1000_fc_full,
405 e1000_fc_default = 0xFF
406};
407
408enum e1000_ms_type {
409 e1000_ms_hw_default = 0,
410 e1000_ms_force_master,
411 e1000_ms_force_slave,
412 e1000_ms_auto
413};
414
415enum e1000_smart_speed {
416 e1000_smart_speed_default = 0,
417 e1000_smart_speed_on,
418 e1000_smart_speed_off
419};
420
421/* Receive Descriptor */
422struct e1000_rx_desc {
423 u64 buffer_addr; /* Address of the descriptor's data buffer */
424 u16 length; /* Length of data DMAed into data buffer */
425 u16 csum; /* Packet checksum */
426 u8 status; /* Descriptor status */
427 u8 errors; /* Descriptor Errors */
428 u16 special;
429};
430
431/* Receive Descriptor - Extended */
432union e1000_rx_desc_extended {
433 struct {
434 u64 buffer_addr;
435 u64 reserved;
436 } read;
437 struct {
438 struct {
439 u32 mrq; /* Multiple Rx Queues */
440 union {
441 u32 rss; /* RSS Hash */
442 struct {
443 u16 ip_id; /* IP id */
444 u16 csum; /* Packet Checksum */
445 } csum_ip;
446 } hi_dword;
447 } lower;
448 struct {
449 u32 status_error; /* ext status/error */
450 u16 length;
451 u16 vlan; /* VLAN tag */
452 } upper;
453 } wb; /* writeback */
454};
455
456#define MAX_PS_BUFFERS 4
457/* Receive Descriptor - Packet Split */
458union e1000_rx_desc_packet_split {
459 struct {
460 /* one buffer for protocol header(s), three data buffers */
461 u64 buffer_addr[MAX_PS_BUFFERS];
462 } read;
463 struct {
464 struct {
465 u32 mrq; /* Multiple Rx Queues */
466 union {
467 u32 rss; /* RSS Hash */
468 struct {
469 u16 ip_id; /* IP id */
470 u16 csum; /* Packet Checksum */
471 } csum_ip;
472 } hi_dword;
473 } lower;
474 struct {
475 u32 status_error; /* ext status/error */
476 u16 length0; /* length of buffer 0 */
477 u16 vlan; /* VLAN tag */
478 } middle;
479 struct {
480 u16 header_status;
481 u16 length[3]; /* length of buffers 1-3 */
482 } upper;
483 u64 reserved;
484 } wb; /* writeback */
485};
486
487/* Transmit Descriptor */
488struct e1000_tx_desc {
489 u64 buffer_addr; /* Address of the descriptor's data buffer */
490 union {
491 u32 data;
492 struct {
493 u16 length; /* Data buffer length */
494 u8 cso; /* Checksum offset */
495 u8 cmd; /* Descriptor control */
496 } flags;
497 } lower;
498 union {
499 u32 data;
500 struct {
501 u8 status; /* Descriptor status */
502 u8 css; /* Checksum start */
503 u16 special;
504 } fields;
505 } upper;
506};
507
508/* Offload Context Descriptor */
509struct e1000_context_desc {
510 union {
511 u32 ip_config;
512 struct {
513 u8 ipcss; /* IP checksum start */
514 u8 ipcso; /* IP checksum offset */
515 u16 ipcse; /* IP checksum end */
516 } ip_fields;
517 } lower_setup;
518 union {
519 u32 tcp_config;
520 struct {
521 u8 tucss; /* TCP checksum start */
522 u8 tucso; /* TCP checksum offset */
523 u16 tucse; /* TCP checksum end */
524 } tcp_fields;
525 } upper_setup;
526 u32 cmd_and_length;
527 union {
528 u32 data;
529 struct {
530 u8 status; /* Descriptor status */
531 u8 hdr_len; /* Header length */
532 u16 mss; /* Maximum segment size */
533 } fields;
534 } tcp_seg_setup;
535};
536
537/* Offload data descriptor */
538struct e1000_data_desc {
539 u64 buffer_addr; /* Address of the descriptor's buffer address */
540 union {
541 u32 data;
542 struct {
543 u16 length; /* Data buffer length */
544 u8 typ_len_ext;
545 u8 cmd;
546 } flags;
547 } lower;
548 union {
549 u32 data;
550 struct {
551 u8 status; /* Descriptor status */
552 u8 popts; /* Packet Options */
553 u16 special; /* */
554 } fields;
555 } upper;
556};
557
558/* Statistics counters collected by the MAC */
559struct e1000_hw_stats {
560 u64 crcerrs;
561 u64 algnerrc;
562 u64 symerrs;
563 u64 rxerrc;
564 u64 mpc;
565 u64 scc;
566 u64 ecol;
567 u64 mcc;
568 u64 latecol;
569 u64 colc;
570 u64 dc;
571 u64 tncrs;
572 u64 sec;
573 u64 cexterr;
574 u64 rlec;
575 u64 xonrxc;
576 u64 xontxc;
577 u64 xoffrxc;
578 u64 xofftxc;
579 u64 fcruc;
580 u64 prc64;
581 u64 prc127;
582 u64 prc255;
583 u64 prc511;
584 u64 prc1023;
585 u64 prc1522;
586 u64 gprc;
587 u64 bprc;
588 u64 mprc;
589 u64 gptc;
590 u64 gorcl;
591 u64 gorch;
592 u64 gotcl;
593 u64 gotch;
594 u64 rnbc;
595 u64 ruc;
596 u64 rfc;
597 u64 roc;
598 u64 rjc;
599 u64 mgprc;
600 u64 mgpdc;
601 u64 mgptc;
602 u64 torl;
603 u64 torh;
604 u64 totl;
605 u64 toth;
606 u64 tpr;
607 u64 tpt;
608 u64 ptc64;
609 u64 ptc127;
610 u64 ptc255;
611 u64 ptc511;
612 u64 ptc1023;
613 u64 ptc1522;
614 u64 mptc;
615 u64 bptc;
616 u64 tsctc;
617 u64 tsctfc;
618 u64 iac;
619 u64 icrxptc;
620 u64 icrxatc;
621 u64 ictxptc;
622 u64 ictxatc;
623 u64 ictxqec;
624 u64 ictxqmtc;
625 u64 icrxdmtc;
626 u64 icrxoc;
627};
628
629struct e1000_phy_stats {
630 u32 idle_errors;
631 u32 receive_errors;
632};
633
634struct e1000_host_mng_dhcp_cookie {
635 u32 signature;
636 u8 status;
637 u8 reserved0;
638 u16 vlan_id;
639 u32 reserved1;
640 u16 reserved2;
641 u8 reserved3;
642 u8 checksum;
643};
644
645/* Host Interface "Rev 1" */
646struct e1000_host_command_header {
647 u8 command_id;
648 u8 command_length;
649 u8 command_options;
650 u8 checksum;
651};
652
653#define E1000_HI_MAX_DATA_LENGTH 252
654struct e1000_host_command_info {
655 struct e1000_host_command_header command_header;
656 u8 command_data[E1000_HI_MAX_DATA_LENGTH];
657};
658
659/* Host Interface "Rev 2" */
660struct e1000_host_mng_command_header {
661 u8 command_id;
662 u8 checksum;
663 u16 reserved1;
664 u16 reserved2;
665 u16 command_length;
666};
667
668#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
669struct e1000_host_mng_command_info {
670 struct e1000_host_mng_command_header command_header;
671 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
672};
673
674/* Function pointers and static data for the MAC. */
675struct e1000_mac_operations {
676 u32 mng_mode_enab;
677
678 s32 (*check_for_link)(struct e1000_hw *);
679 s32 (*cleanup_led)(struct e1000_hw *);
680 void (*clear_hw_cntrs)(struct e1000_hw *);
681 s32 (*get_bus_info)(struct e1000_hw *);
682 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
683 s32 (*led_on)(struct e1000_hw *);
684 s32 (*led_off)(struct e1000_hw *);
685 void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32,
686 u32);
687 s32 (*reset_hw)(struct e1000_hw *);
688 s32 (*init_hw)(struct e1000_hw *);
689 s32 (*setup_link)(struct e1000_hw *);
690 s32 (*setup_physical_interface)(struct e1000_hw *);
691};
692
693/* Function pointers for the PHY. */
694struct e1000_phy_operations {
695 s32 (*acquire_phy)(struct e1000_hw *);
696 s32 (*check_reset_block)(struct e1000_hw *);
697 s32 (*commit_phy)(struct e1000_hw *);
698 s32 (*force_speed_duplex)(struct e1000_hw *);
699 s32 (*get_cfg_done)(struct e1000_hw *hw);
700 s32 (*get_cable_length)(struct e1000_hw *);
701 s32 (*get_phy_info)(struct e1000_hw *);
702 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
703 void (*release_phy)(struct e1000_hw *);
704 s32 (*reset_phy)(struct e1000_hw *);
705 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
706 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
707 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
708};
709
710/* Function pointers for the NVM. */
711struct e1000_nvm_operations {
712 s32 (*acquire_nvm)(struct e1000_hw *);
713 s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
714 void (*release_nvm)(struct e1000_hw *);
715 s32 (*update_nvm)(struct e1000_hw *);
716 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
717 s32 (*validate_nvm)(struct e1000_hw *);
718 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
719};
720
721struct e1000_mac_info {
722 struct e1000_mac_operations ops;
723
724 u8 addr[6];
725 u8 perm_addr[6];
726
727 enum e1000_mac_type type;
728 enum e1000_fc_mode fc;
729 enum e1000_fc_mode original_fc;
730
731 u32 collision_delta;
732 u32 ledctl_default;
733 u32 ledctl_mode1;
734 u32 ledctl_mode2;
735 u32 max_frame_size;
736 u32 mc_filter_type;
737 u32 min_frame_size;
738 u32 tx_packet_delta;
739 u32 txcw;
740
741 u16 current_ifs_val;
742 u16 ifs_max_val;
743 u16 ifs_min_val;
744 u16 ifs_ratio;
745 u16 ifs_step_size;
746 u16 mta_reg_count;
747 u16 rar_entry_count;
748 u16 fc_high_water;
749 u16 fc_low_water;
750 u16 fc_pause_time;
751
752 u8 forced_speed_duplex;
753
754 bool arc_subsystem_valid;
755 bool autoneg;
756 bool autoneg_failed;
757 bool get_link_status;
758 bool in_ifs_mode;
759 bool serdes_has_link;
760 bool tx_pkt_filtering;
761};
762
763struct e1000_phy_info {
764 struct e1000_phy_operations ops;
765
766 enum e1000_phy_type type;
767
768 enum e1000_1000t_rx_status local_rx;
769 enum e1000_1000t_rx_status remote_rx;
770 enum e1000_ms_type ms_type;
771 enum e1000_ms_type original_ms_type;
772 enum e1000_rev_polarity cable_polarity;
773 enum e1000_smart_speed smart_speed;
774
775 u32 addr;
776 u32 id;
777 u32 reset_delay_us; /* in usec */
778 u32 revision;
779
780 u16 autoneg_advertised;
781 u16 autoneg_mask;
782 u16 cable_length;
783 u16 max_cable_length;
784 u16 min_cable_length;
785
786 u8 mdix;
787
788 bool disable_polarity_correction;
789 bool is_mdix;
790 bool polarity_correction;
791 bool speed_downgraded;
792 bool wait_for_link;
793};
794
795struct e1000_nvm_info {
796 struct e1000_nvm_operations ops;
797
798 enum e1000_nvm_type type;
799 enum e1000_nvm_override override;
800
801 u32 flash_bank_size;
802 u32 flash_base_addr;
803
804 u16 word_size;
805 u16 delay_usec;
806 u16 address_bits;
807 u16 opcode_bits;
808 u16 page_size;
809};
810
811struct e1000_bus_info {
812 enum e1000_bus_width width;
813
814 u16 func;
815};
816
817struct e1000_dev_spec_82571 {
818 bool laa_is_present;
819};
820
821struct e1000_shadow_ram {
822 u16 value;
823 bool modified;
824};
825
826#define E1000_ICH8_SHADOW_RAM_WORDS 2048
827
828struct e1000_dev_spec_ich8lan {
829 bool kmrn_lock_loss_workaround_enabled;
830 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
831};
832
833struct e1000_hw {
834 struct e1000_adapter *adapter;
835
836 u8 __iomem *hw_addr;
837 u8 __iomem *flash_address;
838
839 struct e1000_mac_info mac;
840 struct e1000_phy_info phy;
841 struct e1000_nvm_info nvm;
842 struct e1000_bus_info bus;
843 struct e1000_host_mng_dhcp_cookie mng_cookie;
844
845 union {
846 struct e1000_dev_spec_82571 e82571;
847 struct e1000_dev_spec_ich8lan ich8lan;
848 } dev_spec;
849
850 enum e1000_media_type media_type;
851};
852
853#ifdef DEBUG
854#define hw_dbg(hw, format, arg...) \
855 printk(KERN_DEBUG, "%s: " format, e1000_get_hw_dev_name(hw), ##arg);
856#else
857static inline int __attribute__ ((format (printf, 2, 3)))
858hw_dbg(struct e1000_hw *hw, const char *format, ...)
859{
860 return 0;
861}
862#endif
863
864#endif
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
new file mode 100644
index 000000000000..8f8139de1f48
--- /dev/null
+++ b/drivers/net/e1000e/ich8lan.c
@@ -0,0 +1,2225 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 82562G-2 10/100 Network Connection
31 * 82562GT 10/100 Network Connection
32 * 82562GT-2 10/100 Network Connection
33 * 82562V 10/100 Network Connection
34 * 82562V-2 10/100 Network Connection
35 * 82566DC-2 Gigabit Network Connection
36 * 82566DC Gigabit Network Connection
37 * 82566DM-2 Gigabit Network Connection
38 * 82566DM Gigabit Network Connection
39 * 82566MC Gigabit Network Connection
40 * 82566MM Gigabit Network Connection
41 */
42
43#include <linux/netdevice.h>
44#include <linux/ethtool.h>
45#include <linux/delay.h>
46#include <linux/pci.h>
47
48#include "e1000.h"
49
50#define ICH_FLASH_GFPREG 0x0000
51#define ICH_FLASH_HSFSTS 0x0004
52#define ICH_FLASH_HSFCTL 0x0006
53#define ICH_FLASH_FADDR 0x0008
54#define ICH_FLASH_FDATA0 0x0010
55
56#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
57#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
58#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
59#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
60#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
61
62#define ICH_CYCLE_READ 0
63#define ICH_CYCLE_WRITE 2
64#define ICH_CYCLE_ERASE 3
65
66#define FLASH_GFPREG_BASE_MASK 0x1FFF
67#define FLASH_SECTOR_ADDR_SHIFT 12
68
69#define ICH_FLASH_SEG_SIZE_256 256
70#define ICH_FLASH_SEG_SIZE_4K 4096
71#define ICH_FLASH_SEG_SIZE_8K 8192
72#define ICH_FLASH_SEG_SIZE_64K 65536
73
74
75#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
76
77#define E1000_ICH_MNG_IAMT_MODE 0x2
78
79#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
80 (ID_LED_DEF1_OFF2 << 8) | \
81 (ID_LED_DEF1_ON2 << 4) | \
82 (ID_LED_DEF1_DEF2))
83
84#define E1000_ICH_NVM_SIG_WORD 0x13
85#define E1000_ICH_NVM_SIG_MASK 0xC000
86
87#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
88
89#define E1000_FEXTNVM_SW_CONFIG 1
90#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
91
92#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
93
94#define E1000_ICH_RAR_ENTRIES 7
95
96#define PHY_PAGE_SHIFT 5
97#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
98 ((reg) & MAX_PHY_REG_ADDRESS))
99#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
100#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
101
102#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
103#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
104#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
105
106/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
107/* Offset 04h HSFSTS */
108union ich8_hws_flash_status {
109 struct ich8_hsfsts {
110 u16 flcdone :1; /* bit 0 Flash Cycle Done */
111 u16 flcerr :1; /* bit 1 Flash Cycle Error */
112 u16 dael :1; /* bit 2 Direct Access error Log */
113 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
114 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
115 u16 reserved1 :2; /* bit 13:6 Reserved */
116 u16 reserved2 :6; /* bit 13:6 Reserved */
117 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
118 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
119 } hsf_status;
120 u16 regval;
121};
122
123/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
124/* Offset 06h FLCTL */
125union ich8_hws_flash_ctrl {
126 struct ich8_hsflctl {
127 u16 flcgo :1; /* 0 Flash Cycle Go */
128 u16 flcycle :2; /* 2:1 Flash Cycle */
129 u16 reserved :5; /* 7:3 Reserved */
130 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
131 u16 flockdn :6; /* 15:10 Reserved */
132 } hsf_ctrl;
133 u16 regval;
134};
135
136/* ICH Flash Region Access Permissions */
137union ich8_hws_flash_regacc {
138 struct ich8_flracc {
139 u32 grra :8; /* 0:7 GbE region Read Access */
140 u32 grwa :8; /* 8:15 GbE region Write Access */
141 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
142 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
143 } hsf_flregacc;
144 u16 regval;
145};
146
147static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
148static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
149static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
150static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
151static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
152static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
153 u32 offset, u8 byte);
154static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
155 u16 *data);
156static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
157 u8 size, u16 *data);
158static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
159static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
160
161static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
162{
163 return readw(hw->flash_address + reg);
164}
165
166static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
167{
168 return readl(hw->flash_address + reg);
169}
170
171static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
172{
173 writew(val, hw->flash_address + reg);
174}
175
176static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
177{
178 writel(val, hw->flash_address + reg);
179}
180
181#define er16flash(reg) __er16flash(hw, (reg))
182#define er32flash(reg) __er32flash(hw, (reg))
183#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
184#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
185
186/**
187 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
188 * @hw: pointer to the HW structure
189 *
190 * Initialize family-specific PHY parameters and function pointers.
191 **/
192static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
193{
194 struct e1000_phy_info *phy = &hw->phy;
195 s32 ret_val;
196 u16 i = 0;
197
198 phy->addr = 1;
199 phy->reset_delay_us = 100;
200
201 phy->id = 0;
202 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
203 (i++ < 100)) {
204 msleep(1);
205 ret_val = e1000e_get_phy_id(hw);
206 if (ret_val)
207 return ret_val;
208 }
209
210 /* Verify phy id */
211 switch (phy->id) {
212 case IGP03E1000_E_PHY_ID:
213 phy->type = e1000_phy_igp_3;
214 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
215 break;
216 case IFE_E_PHY_ID:
217 case IFE_PLUS_E_PHY_ID:
218 case IFE_C_E_PHY_ID:
219 phy->type = e1000_phy_ife;
220 phy->autoneg_mask = E1000_ALL_NOT_GIG;
221 break;
222 default:
223 return -E1000_ERR_PHY;
224 break;
225 }
226
227 return 0;
228}
229
230/**
231 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
232 * @hw: pointer to the HW structure
233 *
234 * Initialize family-specific NVM parameters and function
235 * pointers.
236 **/
237static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
238{
239 struct e1000_nvm_info *nvm = &hw->nvm;
240 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
241 u32 gfpreg;
242 u32 sector_base_addr;
243 u32 sector_end_addr;
244 u16 i;
245
246 /* Can't read flash registers if the register set isn't mapped.
247 */
248 if (!hw->flash_address) {
249 hw_dbg(hw, "ERROR: Flash registers not mapped\n");
250 return -E1000_ERR_CONFIG;
251 }
252
253 nvm->type = e1000_nvm_flash_sw;
254
255 gfpreg = er32flash(ICH_FLASH_GFPREG);
256
257 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
258 * Add 1 to sector_end_addr since this sector is included in
259 * the overall size. */
260 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
261 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
262
263 /* flash_base_addr is byte-aligned */
264 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
265
266 /* find total size of the NVM, then cut in half since the total
267 * size represents two separate NVM banks. */
268 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
269 << FLASH_SECTOR_ADDR_SHIFT;
270 nvm->flash_bank_size /= 2;
271 /* Adjust to word count */
272 nvm->flash_bank_size /= sizeof(u16);
273
274 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
275
276 /* Clear shadow ram */
277 for (i = 0; i < nvm->word_size; i++) {
278 dev_spec->shadow_ram[i].modified = 0;
279 dev_spec->shadow_ram[i].value = 0xFFFF;
280 }
281
282 return 0;
283}
284
285/**
286 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
287 * @hw: pointer to the HW structure
288 *
289 * Initialize family-specific MAC parameters and function
290 * pointers.
291 **/
292static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
293{
294 struct e1000_hw *hw = &adapter->hw;
295 struct e1000_mac_info *mac = &hw->mac;
296
297 /* Set media type function pointer */
298 hw->media_type = e1000_media_type_copper;
299
300 /* Set mta register count */
301 mac->mta_reg_count = 32;
302 /* Set rar entry count */
303 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
304 if (mac->type == e1000_ich8lan)
305 mac->rar_entry_count--;
306 /* Set if manageability features are enabled. */
307 mac->arc_subsystem_valid = 1;
308
309 /* Enable PCS Lock-loss workaround for ICH8 */
310 if (mac->type == e1000_ich8lan)
311 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1);
312
313 return 0;
314}
315
316static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter)
317{
318 struct e1000_hw *hw = &adapter->hw;
319 s32 rc;
320
321 rc = e1000_init_mac_params_ich8lan(adapter);
322 if (rc)
323 return rc;
324
325 rc = e1000_init_nvm_params_ich8lan(hw);
326 if (rc)
327 return rc;
328
329 rc = e1000_init_phy_params_ich8lan(hw);
330 if (rc)
331 return rc;
332
333 if ((adapter->hw.mac.type == e1000_ich8lan) &&
334 (adapter->hw.phy.type == e1000_phy_igp_3))
335 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
336
337 return 0;
338}
339
340/**
341 * e1000_acquire_swflag_ich8lan - Acquire software control flag
342 * @hw: pointer to the HW structure
343 *
344 * Acquires the software control flag for performing NVM and PHY
345 * operations. This is a function pointer entry point only called by
346 * read/write routines for the PHY and NVM parts.
347 **/
348static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
349{
350 u32 extcnf_ctrl;
351 u32 timeout = PHY_CFG_TIMEOUT;
352
353 while (timeout) {
354 extcnf_ctrl = er32(EXTCNF_CTRL);
355 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
356 ew32(EXTCNF_CTRL, extcnf_ctrl);
357
358 extcnf_ctrl = er32(EXTCNF_CTRL);
359 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
360 break;
361 mdelay(1);
362 timeout--;
363 }
364
365 if (!timeout) {
366 hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
367 return -E1000_ERR_CONFIG;
368 }
369
370 return 0;
371}
372
373/**
374 * e1000_release_swflag_ich8lan - Release software control flag
375 * @hw: pointer to the HW structure
376 *
377 * Releases the software control flag for performing NVM and PHY operations.
378 * This is a function pointer entry point only called by read/write
379 * routines for the PHY and NVM parts.
380 **/
381static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
382{
383 u32 extcnf_ctrl;
384
385 extcnf_ctrl = er32(EXTCNF_CTRL);
386 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
387 ew32(EXTCNF_CTRL, extcnf_ctrl);
388}
389
390/**
391 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
392 * @hw: pointer to the HW structure
393 *
394 * Checks if firmware is blocking the reset of the PHY.
395 * This is a function pointer entry point only called by
396 * reset routines.
397 **/
398static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
399{
400 u32 fwsm;
401
402 fwsm = er32(FWSM);
403
404 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
405}
406
407/**
408 * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex
409 * @hw: pointer to the HW structure
410 *
411 * Forces the speed and duplex settings of the PHY.
412 * This is a function pointer entry point only called by
413 * PHY setup routines.
414 **/
415static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
416{
417 struct e1000_phy_info *phy = &hw->phy;
418 s32 ret_val;
419 u16 data;
420 bool link;
421
422 if (phy->type != e1000_phy_ife) {
423 ret_val = e1000e_phy_force_speed_duplex_igp(hw);
424 return ret_val;
425 }
426
427 ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
428 if (ret_val)
429 return ret_val;
430
431 e1000e_phy_force_speed_duplex_setup(hw, &data);
432
433 ret_val = e1e_wphy(hw, PHY_CONTROL, data);
434 if (ret_val)
435 return ret_val;
436
437 /* Disable MDI-X support for 10/100 */
438 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
439 if (ret_val)
440 return ret_val;
441
442 data &= ~IFE_PMC_AUTO_MDIX;
443 data &= ~IFE_PMC_FORCE_MDIX;
444
445 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
446 if (ret_val)
447 return ret_val;
448
449 hw_dbg(hw, "IFE PMC: %X\n", data);
450
451 udelay(1);
452
453 if (phy->wait_for_link) {
454 hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n");
455
456 ret_val = e1000e_phy_has_link_generic(hw,
457 PHY_FORCE_LIMIT,
458 100000,
459 &link);
460 if (ret_val)
461 return ret_val;
462
463 if (!link)
464 hw_dbg(hw, "Link taking longer than expected.\n");
465
466 /* Try once more */
467 ret_val = e1000e_phy_has_link_generic(hw,
468 PHY_FORCE_LIMIT,
469 100000,
470 &link);
471 if (ret_val)
472 return ret_val;
473 }
474
475 return 0;
476}
477
478/**
479 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
480 * @hw: pointer to the HW structure
481 *
482 * Resets the PHY
483 * This is a function pointer entry point called by drivers
484 * or other shared routines.
485 **/
486static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
487{
488 struct e1000_phy_info *phy = &hw->phy;
489 u32 i;
490 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
491 s32 ret_val;
492 u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
493 u16 word_addr, reg_data, reg_addr, phy_page = 0;
494
495 ret_val = e1000e_phy_hw_reset_generic(hw);
496 if (ret_val)
497 return ret_val;
498
499 /* Initialize the PHY from the NVM on ICH platforms. This
500 * is needed due to an issue where the NVM configuration is
501 * not properly autoloaded after power transitions.
502 * Therefore, after each PHY reset, we will load the
503 * configuration data out of the NVM manually.
504 */
505 if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
506 struct e1000_adapter *adapter = hw->adapter;
507
508 /* Check if SW needs configure the PHY */
509 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
510 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M))
511 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
512 else
513 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
514
515 data = er32(FEXTNVM);
516 if (!(data & sw_cfg_mask))
517 return 0;
518
519 /* Wait for basic configuration completes before proceeding*/
520 do {
521 data = er32(STATUS);
522 data &= E1000_STATUS_LAN_INIT_DONE;
523 udelay(100);
524 } while ((!data) && --loop);
525
526 /* If basic configuration is incomplete before the above loop
527 * count reaches 0, loading the configuration from NVM will
528 * leave the PHY in a bad state possibly resulting in no link.
529 */
530 if (loop == 0) {
531 hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
532 }
533
534 /* Clear the Init Done bit for the next init event */
535 data = er32(STATUS);
536 data &= ~E1000_STATUS_LAN_INIT_DONE;
537 ew32(STATUS, data);
538
539 /* Make sure HW does not configure LCD from PHY
540 * extended configuration before SW configuration */
541 data = er32(EXTCNF_CTRL);
542 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
543 return 0;
544
545 cnf_size = er32(EXTCNF_SIZE);
546 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
547 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
548 if (!cnf_size)
549 return 0;
550
551 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
552 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
553
554 /* Configure LCD from extended configuration
555 * region. */
556
557 /* cnf_base_addr is in DWORD */
558 word_addr = (u16)(cnf_base_addr << 1);
559
560 for (i = 0; i < cnf_size; i++) {
561 ret_val = e1000_read_nvm(hw,
562 (word_addr + i * 2),
563 1,
564 &reg_data);
565 if (ret_val)
566 return ret_val;
567
568 ret_val = e1000_read_nvm(hw,
569 (word_addr + i * 2 + 1),
570 1,
571 &reg_addr);
572 if (ret_val)
573 return ret_val;
574
575 /* Save off the PHY page for future writes. */
576 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
577 phy_page = reg_data;
578 continue;
579 }
580
581 reg_addr |= phy_page;
582
583 ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data);
584 if (ret_val)
585 return ret_val;
586 }
587 }
588
589 return 0;
590}
591
592/**
593 * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
594 * @hw: pointer to the HW structure
595 *
596 * Populates "phy" structure with various feature states.
597 * This function is only called by other family-specific
598 * routines.
599 **/
600static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
601{
602 struct e1000_phy_info *phy = &hw->phy;
603 s32 ret_val;
604 u16 data;
605 bool link;
606
607 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
608 if (ret_val)
609 return ret_val;
610
611 if (!link) {
612 hw_dbg(hw, "Phy info is only valid if link is up\n");
613 return -E1000_ERR_CONFIG;
614 }
615
616 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
617 if (ret_val)
618 return ret_val;
619 phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE));
620
621 if (phy->polarity_correction) {
622 ret_val = e1000_check_polarity_ife_ich8lan(hw);
623 if (ret_val)
624 return ret_val;
625 } else {
626 /* Polarity is forced */
627 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
628 ? e1000_rev_polarity_reversed
629 : e1000_rev_polarity_normal;
630 }
631
632 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
633 if (ret_val)
634 return ret_val;
635
636 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS);
637
638 /* The following parameters are undefined for 10/100 operation. */
639 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
640 phy->local_rx = e1000_1000t_rx_status_undefined;
641 phy->remote_rx = e1000_1000t_rx_status_undefined;
642
643 return 0;
644}
645
646/**
647 * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
648 * @hw: pointer to the HW structure
649 *
650 * Wrapper for calling the get_phy_info routines for the appropriate phy type.
651 * This is a function pointer entry point called by drivers
652 * or other shared routines.
653 **/
654static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
655{
656 switch (hw->phy.type) {
657 case e1000_phy_ife:
658 return e1000_get_phy_info_ife_ich8lan(hw);
659 break;
660 case e1000_phy_igp_3:
661 return e1000e_get_phy_info_igp(hw);
662 break;
663 default:
664 break;
665 }
666
667 return -E1000_ERR_PHY_TYPE;
668}
669
670/**
671 * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
672 * @hw: pointer to the HW structure
673 *
674 * Polarity is determined on the polarity reveral feature being enabled.
675 * This function is only called by other family-specific
676 * routines.
677 **/
678static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
679{
680 struct e1000_phy_info *phy = &hw->phy;
681 s32 ret_val;
682 u16 phy_data, offset, mask;
683
684 /* Polarity is determined based on the reversal feature
685 * being enabled.
686 */
687 if (phy->polarity_correction) {
688 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
689 mask = IFE_PESC_POLARITY_REVERSED;
690 } else {
691 offset = IFE_PHY_SPECIAL_CONTROL;
692 mask = IFE_PSC_FORCE_POLARITY;
693 }
694
695 ret_val = e1e_rphy(hw, offset, &phy_data);
696
697 if (!ret_val)
698 phy->cable_polarity = (phy_data & mask)
699 ? e1000_rev_polarity_reversed
700 : e1000_rev_polarity_normal;
701
702 return ret_val;
703}
704
705/**
706 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
707 * @hw: pointer to the HW structure
708 * @active: TRUE to enable LPLU, FALSE to disable
709 *
710 * Sets the LPLU D0 state according to the active flag. When
711 * activating LPLU this function also disables smart speed
712 * and vice versa. LPLU will not be activated unless the
713 * device autonegotiation advertisement meets standards of
714 * either 10 or 10/100 or 10/100/1000 at all duplexes.
715 * This is a function pointer entry point only called by
716 * PHY setup routines.
717 **/
718static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
719{
720 struct e1000_phy_info *phy = &hw->phy;
721 u32 phy_ctrl;
722 s32 ret_val = 0;
723 u16 data;
724
725 if (phy->type != e1000_phy_igp_3)
726 return ret_val;
727
728 phy_ctrl = er32(PHY_CTRL);
729
730 if (active) {
731 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
732 ew32(PHY_CTRL, phy_ctrl);
733
734 /* Call gig speed drop workaround on LPLU before accessing
735 * any PHY registers */
736 if ((hw->mac.type == e1000_ich8lan) &&
737 (hw->phy.type == e1000_phy_igp_3))
738 e1000e_gig_downshift_workaround_ich8lan(hw);
739
740 /* When LPLU is enabled, we should disable SmartSpeed */
741 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
742 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
743 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
744 if (ret_val)
745 return ret_val;
746 } else {
747 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
748 ew32(PHY_CTRL, phy_ctrl);
749
750 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
751 * during Dx states where the power conservation is most
752 * important. During driver activity we should enable
753 * SmartSpeed, so performance is maintained. */
754 if (phy->smart_speed == e1000_smart_speed_on) {
755 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
756 &data);
757 if (ret_val)
758 return ret_val;
759
760 data |= IGP01E1000_PSCFR_SMART_SPEED;
761 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
762 data);
763 if (ret_val)
764 return ret_val;
765 } else if (phy->smart_speed == e1000_smart_speed_off) {
766 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
767 &data);
768 if (ret_val)
769 return ret_val;
770
771 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
772 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
773 data);
774 if (ret_val)
775 return ret_val;
776 }
777 }
778
779 return 0;
780}
781
782/**
783 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
784 * @hw: pointer to the HW structure
785 * @active: TRUE to enable LPLU, FALSE to disable
786 *
787 * Sets the LPLU D3 state according to the active flag. When
788 * activating LPLU this function also disables smart speed
789 * and vice versa. LPLU will not be activated unless the
790 * device autonegotiation advertisement meets standards of
791 * either 10 or 10/100 or 10/100/1000 at all duplexes.
792 * This is a function pointer entry point only called by
793 * PHY setup routines.
794 **/
795static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
796{
797 struct e1000_phy_info *phy = &hw->phy;
798 u32 phy_ctrl;
799 s32 ret_val;
800 u16 data;
801
802 phy_ctrl = er32(PHY_CTRL);
803
804 if (!active) {
805 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
806 ew32(PHY_CTRL, phy_ctrl);
807 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
808 * during Dx states where the power conservation is most
809 * important. During driver activity we should enable
810 * SmartSpeed, so performance is maintained. */
811 if (phy->smart_speed == e1000_smart_speed_on) {
812 ret_val = e1e_rphy(hw,
813 IGP01E1000_PHY_PORT_CONFIG,
814 &data);
815 if (ret_val)
816 return ret_val;
817
818 data |= IGP01E1000_PSCFR_SMART_SPEED;
819 ret_val = e1e_wphy(hw,
820 IGP01E1000_PHY_PORT_CONFIG,
821 data);
822 if (ret_val)
823 return ret_val;
824 } else if (phy->smart_speed == e1000_smart_speed_off) {
825 ret_val = e1e_rphy(hw,
826 IGP01E1000_PHY_PORT_CONFIG,
827 &data);
828 if (ret_val)
829 return ret_val;
830
831 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
832 ret_val = e1e_wphy(hw,
833 IGP01E1000_PHY_PORT_CONFIG,
834 data);
835 if (ret_val)
836 return ret_val;
837 }
838 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
839 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
840 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
841 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
842 ew32(PHY_CTRL, phy_ctrl);
843
844 /* Call gig speed drop workaround on LPLU before accessing
845 * any PHY registers */
846 if ((hw->mac.type == e1000_ich8lan) &&
847 (hw->phy.type == e1000_phy_igp_3))
848 e1000e_gig_downshift_workaround_ich8lan(hw);
849
850 /* When LPLU is enabled, we should disable SmartSpeed */
851 ret_val = e1e_rphy(hw,
852 IGP01E1000_PHY_PORT_CONFIG,
853 &data);
854 if (ret_val)
855 return ret_val;
856
857 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
858 ret_val = e1e_wphy(hw,
859 IGP01E1000_PHY_PORT_CONFIG,
860 data);
861 }
862
863 return 0;
864}
865
866/**
867 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
868 * @hw: pointer to the HW structure
869 * @offset: The offset (in bytes) of the word(s) to read.
870 * @words: Size of data to read in words
871 * @data: Pointer to the word(s) to read at offset.
872 *
873 * Reads a word(s) from the NVM using the flash access registers.
874 **/
875static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
876 u16 *data)
877{
878 struct e1000_nvm_info *nvm = &hw->nvm;
879 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
880 u32 act_offset;
881 s32 ret_val;
882 u16 i, word;
883
884 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
885 (words == 0)) {
886 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
887 return -E1000_ERR_NVM;
888 }
889
890 ret_val = e1000_acquire_swflag_ich8lan(hw);
891 if (ret_val)
892 return ret_val;
893
894 /* Start with the bank offset, then add the relative offset. */
895 act_offset = (er32(EECD) & E1000_EECD_SEC1VAL)
896 ? nvm->flash_bank_size
897 : 0;
898 act_offset += offset;
899
900 for (i = 0; i < words; i++) {
901 if ((dev_spec->shadow_ram) &&
902 (dev_spec->shadow_ram[offset+i].modified)) {
903 data[i] = dev_spec->shadow_ram[offset+i].value;
904 } else {
905 ret_val = e1000_read_flash_word_ich8lan(hw,
906 act_offset + i,
907 &word);
908 if (ret_val)
909 break;
910 data[i] = word;
911 }
912 }
913
914 e1000_release_swflag_ich8lan(hw);
915
916 return ret_val;
917}
918
919/**
920 * e1000_flash_cycle_init_ich8lan - Initialize flash
921 * @hw: pointer to the HW structure
922 *
923 * This function does initial flash setup so that a new read/write/erase cycle
924 * can be started.
925 **/
926static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
927{
928 union ich8_hws_flash_status hsfsts;
929 s32 ret_val = -E1000_ERR_NVM;
930 s32 i = 0;
931
932 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
933
934 /* Check if the flash descriptor is valid */
935 if (hsfsts.hsf_status.fldesvalid == 0) {
936 hw_dbg(hw, "Flash descriptor invalid. "
937 "SW Sequencing must be used.");
938 return -E1000_ERR_NVM;
939 }
940
941 /* Clear FCERR and DAEL in hw status by writing 1 */
942 hsfsts.hsf_status.flcerr = 1;
943 hsfsts.hsf_status.dael = 1;
944
945 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
946
947 /* Either we should have a hardware SPI cycle in progress
948 * bit to check against, in order to start a new cycle or
949 * FDONE bit should be changed in the hardware so that it
950 * is 1 after harware reset, which can then be used as an
951 * indication whether a cycle is in progress or has been
952 * completed.
953 */
954
955 if (hsfsts.hsf_status.flcinprog == 0) {
956 /* There is no cycle running at present,
957 * so we can start a cycle */
958 /* Begin by setting Flash Cycle Done. */
959 hsfsts.hsf_status.flcdone = 1;
960 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
961 ret_val = 0;
962 } else {
963 /* otherwise poll for sometime so the current
964 * cycle has a chance to end before giving up. */
965 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
966 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
967 if (hsfsts.hsf_status.flcinprog == 0) {
968 ret_val = 0;
969 break;
970 }
971 udelay(1);
972 }
973 if (ret_val == 0) {
974 /* Successful in waiting for previous cycle to timeout,
975 * now set the Flash Cycle Done. */
976 hsfsts.hsf_status.flcdone = 1;
977 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
978 } else {
979 hw_dbg(hw, "Flash controller busy, cannot get access");
980 }
981 }
982
983 return ret_val;
984}
985
986/**
987 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
988 * @hw: pointer to the HW structure
989 * @timeout: maximum time to wait for completion
990 *
991 * This function starts a flash cycle and waits for its completion.
992 **/
993static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
994{
995 union ich8_hws_flash_ctrl hsflctl;
996 union ich8_hws_flash_status hsfsts;
997 s32 ret_val = -E1000_ERR_NVM;
998 u32 i = 0;
999
1000 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
1001 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1002 hsflctl.hsf_ctrl.flcgo = 1;
1003 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1004
1005 /* wait till FDONE bit is set to 1 */
1006 do {
1007 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1008 if (hsfsts.hsf_status.flcdone == 1)
1009 break;
1010 udelay(1);
1011 } while (i++ < timeout);
1012
1013 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
1014 return 0;
1015
1016 return ret_val;
1017}
1018
1019/**
1020 * e1000_read_flash_word_ich8lan - Read word from flash
1021 * @hw: pointer to the HW structure
1022 * @offset: offset to data location
1023 * @data: pointer to the location for storing the data
1024 *
1025 * Reads the flash word at offset into data. Offset is converted
1026 * to bytes before read.
1027 **/
1028static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
1029 u16 *data)
1030{
1031 /* Must convert offset into bytes. */
1032 offset <<= 1;
1033
1034 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
1035}
1036
1037/**
1038 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
1039 * @hw: pointer to the HW structure
1040 * @offset: The offset (in bytes) of the byte or word to read.
1041 * @size: Size of data to read, 1=byte 2=word
1042 * @data: Pointer to the word to store the value read.
1043 *
1044 * Reads a byte or word from the NVM using the flash access registers.
1045 **/
1046static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1047 u8 size, u16 *data)
1048{
1049 union ich8_hws_flash_status hsfsts;
1050 union ich8_hws_flash_ctrl hsflctl;
1051 u32 flash_linear_addr;
1052 u32 flash_data = 0;
1053 s32 ret_val = -E1000_ERR_NVM;
1054 u8 count = 0;
1055
1056 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
1057 return -E1000_ERR_NVM;
1058
1059 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
1060 hw->nvm.flash_base_addr;
1061
1062 do {
1063 udelay(1);
1064 /* Steps */
1065 ret_val = e1000_flash_cycle_init_ich8lan(hw);
1066 if (ret_val != 0)
1067 break;
1068
1069 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1070 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
1071 hsflctl.hsf_ctrl.fldbcount = size - 1;
1072 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
1073 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1074
1075 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
1076
1077 ret_val = e1000_flash_cycle_ich8lan(hw,
1078 ICH_FLASH_READ_COMMAND_TIMEOUT);
1079
1080 /* Check if FCERR is set to 1, if set to 1, clear it
1081 * and try the whole sequence a few more times, else
1082 * read in (shift in) the Flash Data0, the order is
1083 * least significant byte first msb to lsb */
1084 if (ret_val == 0) {
1085 flash_data = er32flash(ICH_FLASH_FDATA0);
1086 if (size == 1) {
1087 *data = (u8)(flash_data & 0x000000FF);
1088 } else if (size == 2) {
1089 *data = (u16)(flash_data & 0x0000FFFF);
1090 }
1091 break;
1092 } else {
1093 /* If we've gotten here, then things are probably
1094 * completely hosed, but if the error condition is
1095 * detected, it won't hurt to give it another try...
1096 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
1097 */
1098 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1099 if (hsfsts.hsf_status.flcerr == 1) {
1100 /* Repeat for some time before giving up. */
1101 continue;
1102 } else if (hsfsts.hsf_status.flcdone == 0) {
1103 hw_dbg(hw, "Timeout error - flash cycle "
1104 "did not complete.");
1105 break;
1106 }
1107 }
1108 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
1109
1110 return ret_val;
1111}
1112
1113/**
1114 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
1115 * @hw: pointer to the HW structure
1116 * @offset: The offset (in bytes) of the word(s) to write.
1117 * @words: Size of data to write in words
1118 * @data: Pointer to the word(s) to write at offset.
1119 *
1120 * Writes a byte or word to the NVM using the flash access registers.
1121 **/
1122static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1123 u16 *data)
1124{
1125 struct e1000_nvm_info *nvm = &hw->nvm;
1126 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1127 s32 ret_val;
1128 u16 i;
1129
1130 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1131 (words == 0)) {
1132 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1133 return -E1000_ERR_NVM;
1134 }
1135
1136 ret_val = e1000_acquire_swflag_ich8lan(hw);
1137 if (ret_val)
1138 return ret_val;
1139
1140 for (i = 0; i < words; i++) {
1141 dev_spec->shadow_ram[offset+i].modified = 1;
1142 dev_spec->shadow_ram[offset+i].value = data[i];
1143 }
1144
1145 e1000_release_swflag_ich8lan(hw);
1146
1147 return 0;
1148}
1149
1150/**
1151 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
1152 * @hw: pointer to the HW structure
1153 *
1154 * The NVM checksum is updated by calling the generic update_nvm_checksum,
1155 * which writes the checksum to the shadow ram. The changes in the shadow
1156 * ram are then committed to the EEPROM by processing each bank at a time
1157 * checking for the modified bit and writing only the pending changes.
1158 * After a succesful commit, the shadow ram is cleared and is ready for
1159 * future writes.
1160 **/
1161static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1162{
1163 struct e1000_nvm_info *nvm = &hw->nvm;
1164 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1165 u32 i, act_offset, new_bank_offset, old_bank_offset;
1166 s32 ret_val;
1167 u16 data;
1168
1169 ret_val = e1000e_update_nvm_checksum_generic(hw);
1170 if (ret_val)
1171 return ret_val;;
1172
1173 if (nvm->type != e1000_nvm_flash_sw)
1174 return ret_val;;
1175
1176 ret_val = e1000_acquire_swflag_ich8lan(hw);
1177 if (ret_val)
1178 return ret_val;;
1179
1180 /* We're writing to the opposite bank so if we're on bank 1,
1181 * write to bank 0 etc. We also need to erase the segment that
1182 * is going to be written */
1183 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
1184 new_bank_offset = nvm->flash_bank_size;
1185 old_bank_offset = 0;
1186 e1000_erase_flash_bank_ich8lan(hw, 1);
1187 } else {
1188 old_bank_offset = nvm->flash_bank_size;
1189 new_bank_offset = 0;
1190 e1000_erase_flash_bank_ich8lan(hw, 0);
1191 }
1192
1193 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
1194 /* Determine whether to write the value stored
1195 * in the other NVM bank or a modified value stored
1196 * in the shadow RAM */
1197 if (dev_spec->shadow_ram[i].modified) {
1198 data = dev_spec->shadow_ram[i].value;
1199 } else {
1200 e1000_read_flash_word_ich8lan(hw,
1201 i + old_bank_offset,
1202 &data);
1203 }
1204
1205 /* If the word is 0x13, then make sure the signature bits
1206 * (15:14) are 11b until the commit has completed.
1207 * This will allow us to write 10b which indicates the
1208 * signature is valid. We want to do this after the write
1209 * has completed so that we don't mark the segment valid
1210 * while the write is still in progress */
1211 if (i == E1000_ICH_NVM_SIG_WORD)
1212 data |= E1000_ICH_NVM_SIG_MASK;
1213
1214 /* Convert offset to bytes. */
1215 act_offset = (i + new_bank_offset) << 1;
1216
1217 udelay(100);
1218 /* Write the bytes to the new bank. */
1219 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
1220 act_offset,
1221 (u8)data);
1222 if (ret_val)
1223 break;
1224
1225 udelay(100);
1226 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
1227 act_offset + 1,
1228 (u8)(data >> 8));
1229 if (ret_val)
1230 break;
1231 }
1232
1233 /* Don't bother writing the segment valid bits if sector
1234 * programming failed. */
1235 if (ret_val) {
1236 hw_dbg(hw, "Flash commit failed.\n");
1237 e1000_release_swflag_ich8lan(hw);
1238 return ret_val;
1239 }
1240
1241 /* Finally validate the new segment by setting bit 15:14
1242 * to 10b in word 0x13 , this can be done without an
1243 * erase as well since these bits are 11 to start with
1244 * and we need to change bit 14 to 0b */
1245 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1246 e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1247 data &= 0xBFFF;
1248 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
1249 act_offset * 2 + 1,
1250 (u8)(data >> 8));
1251 if (ret_val) {
1252 e1000_release_swflag_ich8lan(hw);
1253 return ret_val;
1254 }
1255
1256 /* And invalidate the previously valid segment by setting
1257 * its signature word (0x13) high_byte to 0b. This can be
1258 * done without an erase because flash erase sets all bits
1259 * to 1's. We can write 1's to 0's without an erase */
1260 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1261 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1262 if (ret_val) {
1263 e1000_release_swflag_ich8lan(hw);
1264 return ret_val;
1265 }
1266
1267 /* Great! Everything worked, we can now clear the cached entries. */
1268 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
1269 dev_spec->shadow_ram[i].modified = 0;
1270 dev_spec->shadow_ram[i].value = 0xFFFF;
1271 }
1272
1273 e1000_release_swflag_ich8lan(hw);
1274
1275 /* Reload the EEPROM, or else modifications will not appear
1276 * until after the next adapter reset.
1277 */
1278 e1000e_reload_nvm(hw);
1279 msleep(10);
1280
1281 return ret_val;
1282}
1283
1284/**
1285 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
1286 * @hw: pointer to the HW structure
1287 *
1288 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
1289 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
1290 * calculated, in which case we need to calculate the checksum and set bit 6.
1291 **/
1292static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1293{
1294 s32 ret_val;
1295 u16 data;
1296
1297 /* Read 0x19 and check bit 6. If this bit is 0, the checksum
1298 * needs to be fixed. This bit is an indication that the NVM
1299 * was prepared by OEM software and did not calculate the
1300 * checksum...a likely scenario.
1301 */
1302 ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
1303 if (ret_val)
1304 return ret_val;
1305
1306 if ((data & 0x40) == 0) {
1307 data |= 0x40;
1308 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
1309 if (ret_val)
1310 return ret_val;
1311 ret_val = e1000e_update_nvm_checksum(hw);
1312 if (ret_val)
1313 return ret_val;
1314 }
1315
1316 return e1000e_validate_nvm_checksum_generic(hw);
1317}
1318
1319/**
1320 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
1321 * @hw: pointer to the HW structure
1322 * @offset: The offset (in bytes) of the byte/word to read.
1323 * @size: Size of data to read, 1=byte 2=word
1324 * @data: The byte(s) to write to the NVM.
1325 *
1326 * Writes one/two bytes to the NVM using the flash access registers.
1327 **/
1328static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1329 u8 size, u16 data)
1330{
1331 union ich8_hws_flash_status hsfsts;
1332 union ich8_hws_flash_ctrl hsflctl;
1333 u32 flash_linear_addr;
1334 u32 flash_data = 0;
1335 s32 ret_val;
1336 u8 count = 0;
1337
1338 if (size < 1 || size > 2 || data > size * 0xff ||
1339 offset > ICH_FLASH_LINEAR_ADDR_MASK)
1340 return -E1000_ERR_NVM;
1341
1342 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
1343 hw->nvm.flash_base_addr;
1344
1345 do {
1346 udelay(1);
1347 /* Steps */
1348 ret_val = e1000_flash_cycle_init_ich8lan(hw);
1349 if (ret_val)
1350 break;
1351
1352 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1353 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
1354 hsflctl.hsf_ctrl.fldbcount = size -1;
1355 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
1356 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1357
1358 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
1359
1360 if (size == 1)
1361 flash_data = (u32)data & 0x00FF;
1362 else
1363 flash_data = (u32)data;
1364
1365 ew32flash(ICH_FLASH_FDATA0, flash_data);
1366
1367 /* check if FCERR is set to 1 , if set to 1, clear it
1368 * and try the whole sequence a few more times else done */
1369 ret_val = e1000_flash_cycle_ich8lan(hw,
1370 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
1371 if (!ret_val)
1372 break;
1373
1374 /* If we're here, then things are most likely
1375 * completely hosed, but if the error condition
1376 * is detected, it won't hurt to give it another
1377 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
1378 */
1379 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1380 if (hsfsts.hsf_status.flcerr == 1)
1381 /* Repeat for some time before giving up. */
1382 continue;
1383 if (hsfsts.hsf_status.flcdone == 0) {
1384 hw_dbg(hw, "Timeout error - flash cycle "
1385 "did not complete.");
1386 break;
1387 }
1388 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
1389
1390 return ret_val;
1391}
1392
1393/**
1394 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
1395 * @hw: pointer to the HW structure
1396 * @offset: The index of the byte to read.
1397 * @data: The byte to write to the NVM.
1398 *
1399 * Writes a single byte to the NVM using the flash access registers.
1400 **/
1401static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
1402 u8 data)
1403{
1404 u16 word = (u16)data;
1405
1406 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
1407}
1408
1409/**
1410 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
1411 * @hw: pointer to the HW structure
1412 * @offset: The offset of the byte to write.
1413 * @byte: The byte to write to the NVM.
1414 *
1415 * Writes a single byte to the NVM using the flash access registers.
1416 * Goes through a retry algorithm before giving up.
1417 **/
1418static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
1419 u32 offset, u8 byte)
1420{
1421 s32 ret_val;
1422 u16 program_retries;
1423
1424 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
1425 if (!ret_val)
1426 return ret_val;
1427
1428 for (program_retries = 0; program_retries < 100; program_retries++) {
1429 hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset);
1430 udelay(100);
1431 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
1432 if (!ret_val)
1433 break;
1434 }
1435 if (program_retries == 100)
1436 return -E1000_ERR_NVM;
1437
1438 return 0;
1439}
1440
1441/**
1442 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
1443 * @hw: pointer to the HW structure
1444 * @bank: 0 for first bank, 1 for second bank, etc.
1445 *
1446 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
1447 * bank N is 4096 * N + flash_reg_addr.
1448 **/
1449static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1450{
1451 struct e1000_nvm_info *nvm = &hw->nvm;
1452 union ich8_hws_flash_status hsfsts;
1453 union ich8_hws_flash_ctrl hsflctl;
1454 u32 flash_linear_addr;
1455 /* bank size is in 16bit words - adjust to bytes */
1456 u32 flash_bank_size = nvm->flash_bank_size * 2;
1457 s32 ret_val;
1458 s32 count = 0;
1459 s32 iteration;
1460 s32 sector_size;
1461 s32 j;
1462
1463 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1464
1465 /* Determine HW Sector size: Read BERASE bits of hw flash status
1466 * register */
1467 /* 00: The Hw sector is 256 bytes, hence we need to erase 16
1468 * consecutive sectors. The start index for the nth Hw sector
1469 * can be calculated as = bank * 4096 + n * 256
1470 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
1471 * The start index for the nth Hw sector can be calculated
1472 * as = bank * 4096
1473 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
1474 * (ich9 only, otherwise error condition)
1475 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
1476 */
1477 switch (hsfsts.hsf_status.berasesz) {
1478 case 0:
1479 /* Hw sector size 256 */
1480 sector_size = ICH_FLASH_SEG_SIZE_256;
1481 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
1482 break;
1483 case 1:
1484 sector_size = ICH_FLASH_SEG_SIZE_4K;
1485 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
1486 break;
1487 case 2:
1488 if (hw->mac.type == e1000_ich9lan) {
1489 sector_size = ICH_FLASH_SEG_SIZE_8K;
1490 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
1491 } else {
1492 return -E1000_ERR_NVM;
1493 }
1494 break;
1495 case 3:
1496 sector_size = ICH_FLASH_SEG_SIZE_64K;
1497 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
1498 break;
1499 default:
1500 return -E1000_ERR_NVM;
1501 }
1502
1503 /* Start with the base address, then add the sector offset. */
1504 flash_linear_addr = hw->nvm.flash_base_addr;
1505 flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
1506
1507 for (j = 0; j < iteration ; j++) {
1508 do {
1509 /* Steps */
1510 ret_val = e1000_flash_cycle_init_ich8lan(hw);
1511 if (ret_val)
1512 return ret_val;
1513
1514 /* Write a value 11 (block Erase) in Flash
1515 * Cycle field in hw flash control */
1516 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1517 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
1518 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1519
1520 /* Write the last 24 bits of an index within the
1521 * block into Flash Linear address field in Flash
1522 * Address.
1523 */
1524 flash_linear_addr += (j * sector_size);
1525 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
1526
1527 ret_val = e1000_flash_cycle_ich8lan(hw,
1528 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
1529 if (ret_val == 0)
1530 break;
1531
1532 /* Check if FCERR is set to 1. If 1,
1533 * clear it and try the whole sequence
1534 * a few more times else Done */
1535 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1536 if (hsfsts.hsf_status.flcerr == 1)
1537 /* repeat for some time before
1538 * giving up */
1539 continue;
1540 else if (hsfsts.hsf_status.flcdone == 0)
1541 return ret_val;
1542 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
1543 }
1544
1545 return 0;
1546}
1547
1548/**
1549 * e1000_valid_led_default_ich8lan - Set the default LED settings
1550 * @hw: pointer to the HW structure
1551 * @data: Pointer to the LED settings
1552 *
1553 * Reads the LED default settings from the NVM to data. If the NVM LED
1554 * settings is all 0's or F's, set the LED default to a valid LED default
1555 * setting.
1556 **/
1557static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
1558{
1559 s32 ret_val;
1560
1561 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1562 if (ret_val) {
1563 hw_dbg(hw, "NVM Read Error\n");
1564 return ret_val;
1565 }
1566
1567 if (*data == ID_LED_RESERVED_0000 ||
1568 *data == ID_LED_RESERVED_FFFF)
1569 *data = ID_LED_DEFAULT_ICH8LAN;
1570
1571 return 0;
1572}
1573
1574/**
1575 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
1576 * @hw: pointer to the HW structure
1577 *
1578 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
1579 * register, so the the bus width is hard coded.
1580 **/
1581static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
1582{
1583 struct e1000_bus_info *bus = &hw->bus;
1584 s32 ret_val;
1585
1586 ret_val = e1000e_get_bus_info_pcie(hw);
1587
1588 /* ICH devices are "PCI Express"-ish. They have
1589 * a configuration space, but do not contain
1590 * PCI Express Capability registers, so bus width
1591 * must be hardcoded.
1592 */
1593 if (bus->width == e1000_bus_width_unknown)
1594 bus->width = e1000_bus_width_pcie_x1;
1595
1596 return ret_val;
1597}
1598
1599/**
1600 * e1000_reset_hw_ich8lan - Reset the hardware
1601 * @hw: pointer to the HW structure
1602 *
1603 * Does a full reset of the hardware which includes a reset of the PHY and
1604 * MAC.
1605 **/
1606static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1607{
1608 u32 ctrl, icr, kab;
1609 s32 ret_val;
1610
1611 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1612 * on the last TLP read/write transaction when MAC is reset.
1613 */
1614 ret_val = e1000e_disable_pcie_master(hw);
1615 if (ret_val) {
1616 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
1617 }
1618
1619 hw_dbg(hw, "Masking off all interrupts\n");
1620 ew32(IMC, 0xffffffff);
1621
1622 /* Disable the Transmit and Receive units. Then delay to allow
1623 * any pending transactions to complete before we hit the MAC
1624 * with the global reset.
1625 */
1626 ew32(RCTL, 0);
1627 ew32(TCTL, E1000_TCTL_PSP);
1628 e1e_flush();
1629
1630 msleep(10);
1631
1632 /* Workaround for ICH8 bit corruption issue in FIFO memory */
1633 if (hw->mac.type == e1000_ich8lan) {
1634 /* Set Tx and Rx buffer allocation to 8k apiece. */
1635 ew32(PBA, E1000_PBA_8K);
1636 /* Set Packet Buffer Size to 16k. */
1637 ew32(PBS, E1000_PBS_16K);
1638 }
1639
1640 ctrl = er32(CTRL);
1641
1642 if (!e1000_check_reset_block(hw)) {
1643 /* PHY HW reset requires MAC CORE reset at the same
1644 * time to make sure the interface between MAC and the
1645 * external PHY is reset.
1646 */
1647 ctrl |= E1000_CTRL_PHY_RST;
1648 }
1649 ret_val = e1000_acquire_swflag_ich8lan(hw);
1650 hw_dbg(hw, "Issuing a global reset to ich8lan");
1651 ew32(CTRL, (ctrl | E1000_CTRL_RST));
1652 msleep(20);
1653
1654 ret_val = e1000e_get_auto_rd_done(hw);
1655 if (ret_val) {
1656 /*
1657 * When auto config read does not complete, do not
1658 * return with an error. This can happen in situations
1659 * where there is no eeprom and prevents getting link.
1660 */
1661 hw_dbg(hw, "Auto Read Done did not complete\n");
1662 }
1663
1664 ew32(IMC, 0xffffffff);
1665 icr = er32(ICR);
1666
1667 kab = er32(KABGTXD);
1668 kab |= E1000_KABGTXD_BGSQLBIAS;
1669 ew32(KABGTXD, kab);
1670
1671 return ret_val;
1672}
1673
1674/**
1675 * e1000_init_hw_ich8lan - Initialize the hardware
1676 * @hw: pointer to the HW structure
1677 *
1678 * Prepares the hardware for transmit and receive by doing the following:
1679 * - initialize hardware bits
1680 * - initialize LED identification
1681 * - setup receive address registers
1682 * - setup flow control
1683 * - setup transmit discriptors
1684 * - clear statistics
1685 **/
1686static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1687{
1688 struct e1000_mac_info *mac = &hw->mac;
1689 u32 ctrl_ext, txdctl, snoop;
1690 s32 ret_val;
1691 u16 i;
1692
1693 e1000_initialize_hw_bits_ich8lan(hw);
1694
1695 /* Initialize identification LED */
1696 ret_val = e1000e_id_led_init(hw);
1697 if (ret_val) {
1698 hw_dbg(hw, "Error initializing identification LED\n");
1699 return ret_val;
1700 }
1701
1702 /* Setup the receive address. */
1703 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
1704
1705 /* Zero out the Multicast HASH table */
1706 hw_dbg(hw, "Zeroing the MTA\n");
1707 for (i = 0; i < mac->mta_reg_count; i++)
1708 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1709
1710 /* Setup link and flow control */
1711 ret_val = e1000_setup_link_ich8lan(hw);
1712
1713 /* Set the transmit descriptor write-back policy for both queues */
1714 txdctl = er32(TXDCTL);
1715 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1716 E1000_TXDCTL_FULL_TX_DESC_WB;
1717 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1718 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1719 ew32(TXDCTL, txdctl);
1720 txdctl = er32(TXDCTL1);
1721 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1722 E1000_TXDCTL_FULL_TX_DESC_WB;
1723 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1724 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1725 ew32(TXDCTL1, txdctl);
1726
1727 /* ICH8 has opposite polarity of no_snoop bits.
1728 * By default, we should use snoop behavior. */
1729 if (mac->type == e1000_ich8lan)
1730 snoop = PCIE_ICH8_SNOOP_ALL;
1731 else
1732 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
1733 e1000e_set_pcie_no_snoop(hw, snoop);
1734
1735 ctrl_ext = er32(CTRL_EXT);
1736 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1737 ew32(CTRL_EXT, ctrl_ext);
1738
1739 /* Clear all of the statistics registers (clear on read). It is
1740 * important that we do this after we have tried to establish link
1741 * because the symbol error count will increment wildly if there
1742 * is no link.
1743 */
1744 e1000_clear_hw_cntrs_ich8lan(hw);
1745
1746 return 0;
1747}
1748/**
1749 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
1750 * @hw: pointer to the HW structure
1751 *
1752 * Sets/Clears required hardware bits necessary for correctly setting up the
1753 * hardware for transmit and receive.
1754 **/
1755static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
1756{
1757 u32 reg;
1758
1759 /* Extended Device Control */
1760 reg = er32(CTRL_EXT);
1761 reg |= (1 << 22);
1762 ew32(CTRL_EXT, reg);
1763
1764 /* Transmit Descriptor Control 0 */
1765 reg = er32(TXDCTL);
1766 reg |= (1 << 22);
1767 ew32(TXDCTL, reg);
1768
1769 /* Transmit Descriptor Control 1 */
1770 reg = er32(TXDCTL1);
1771 reg |= (1 << 22);
1772 ew32(TXDCTL1, reg);
1773
1774 /* Transmit Arbitration Control 0 */
1775 reg = er32(TARC0);
1776 if (hw->mac.type == e1000_ich8lan)
1777 reg |= (1 << 28) | (1 << 29);
1778 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
1779 ew32(TARC0, reg);
1780
1781 /* Transmit Arbitration Control 1 */
1782 reg = er32(TARC1);
1783 if (er32(TCTL) & E1000_TCTL_MULR)
1784 reg &= ~(1 << 28);
1785 else
1786 reg |= (1 << 28);
1787 reg |= (1 << 24) | (1 << 26) | (1 << 30);
1788 ew32(TARC1, reg);
1789
1790 /* Device Status */
1791 if (hw->mac.type == e1000_ich8lan) {
1792 reg = er32(STATUS);
1793 reg &= ~(1 << 31);
1794 ew32(STATUS, reg);
1795 }
1796}
1797
1798/**
1799 * e1000_setup_link_ich8lan - Setup flow control and link settings
1800 * @hw: pointer to the HW structure
1801 *
1802 * Determines which flow control settings to use, then configures flow
1803 * control. Calls the appropriate media-specific link configuration
1804 * function. Assuming the adapter has a valid link partner, a valid link
1805 * should be established. Assumes the hardware has previously been reset
1806 * and the transmitter and receiver are not enabled.
1807 **/
1808static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
1809{
1810 struct e1000_mac_info *mac = &hw->mac;
1811 s32 ret_val;
1812
1813 if (e1000_check_reset_block(hw))
1814 return 0;
1815
1816 /* ICH parts do not have a word in the NVM to determine
1817 * the default flow control setting, so we explicitly
1818 * set it to full.
1819 */
1820 if (mac->fc == e1000_fc_default)
1821 mac->fc = e1000_fc_full;
1822
1823 mac->original_fc = mac->fc;
1824
1825 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
1826
1827 /* Continue to configure the copper link. */
1828 ret_val = e1000_setup_copper_link_ich8lan(hw);
1829 if (ret_val)
1830 return ret_val;
1831
1832 ew32(FCTTV, mac->fc_pause_time);
1833
1834 return e1000e_set_fc_watermarks(hw);
1835}
1836
1837/**
1838 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
1839 * @hw: pointer to the HW structure
1840 *
1841 * Configures the kumeran interface to the PHY to wait the appropriate time
1842 * when polling the PHY, then call the generic setup_copper_link to finish
1843 * configuring the copper link.
1844 **/
1845static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1846{
1847 u32 ctrl;
1848 s32 ret_val;
1849 u16 reg_data;
1850
1851 ctrl = er32(CTRL);
1852 ctrl |= E1000_CTRL_SLU;
1853 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1854 ew32(CTRL, ctrl);
1855
1856 /* Set the mac to wait the maximum time between each iteration
1857 * and increase the max iterations when polling the phy;
1858 * this fixes erroneous timeouts at 10Mbps. */
1859 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1860 if (ret_val)
1861 return ret_val;
1862 ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
1863 if (ret_val)
1864 return ret_val;
1865 reg_data |= 0x3F;
1866 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
1867 if (ret_val)
1868 return ret_val;
1869
1870 if (hw->phy.type == e1000_phy_igp_3) {
1871 ret_val = e1000e_copper_link_setup_igp(hw);
1872 if (ret_val)
1873 return ret_val;
1874 }
1875
1876 return e1000e_setup_copper_link(hw);
1877}
1878
1879/**
1880 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
1881 * @hw: pointer to the HW structure
1882 * @speed: pointer to store current link speed
1883 * @duplex: pointer to store the current link duplex
1884 *
1885 * Calls the generic get_speed_and_duplex to retreive the current link
1886 * information and then calls the Kumeran lock loss workaround for links at
1887 * gigabit speeds.
1888 **/
1889static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
1890 u16 *duplex)
1891{
1892 s32 ret_val;
1893
1894 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
1895 if (ret_val)
1896 return ret_val;
1897
1898 if ((hw->mac.type == e1000_ich8lan) &&
1899 (hw->phy.type == e1000_phy_igp_3) &&
1900 (*speed == SPEED_1000)) {
1901 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
1902 }
1903
1904 return ret_val;
1905}
1906
1907/**
1908 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
1909 * @hw: pointer to the HW structure
1910 *
1911 * Work-around for 82566 Kumeran PCS lock loss:
1912 * On link status change (i.e. PCI reset, speed change) and link is up and
1913 * speed is gigabit-
1914 * 0) if workaround is optionally disabled do nothing
1915 * 1) wait 1ms for Kumeran link to come up
1916 * 2) check Kumeran Diagnostic register PCS lock loss bit
1917 * 3) if not set the link is locked (all is good), otherwise...
1918 * 4) reset the PHY
1919 * 5) repeat up to 10 times
1920 * Note: this is only called for IGP3 copper when speed is 1gb.
1921 **/
1922static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1923{
1924 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1925 u32 phy_ctrl;
1926 s32 ret_val;
1927 u16 i, data;
1928 bool link;
1929
1930 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
1931 return 0;
1932
1933 /* Make sure link is up before proceeding. If not just return.
1934 * Attempting this while link is negotiating fouled up link
1935 * stability */
1936 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1937 if (!link)
1938 return 0;
1939
1940 for (i = 0; i < 10; i++) {
1941 /* read once to clear */
1942 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
1943 if (ret_val)
1944 return ret_val;
1945 /* and again to get new status */
1946 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
1947 if (ret_val)
1948 return ret_val;
1949
1950 /* check for PCS lock */
1951 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
1952 return 0;
1953
1954 /* Issue PHY reset */
1955 e1000_phy_hw_reset(hw);
1956 mdelay(5);
1957 }
1958 /* Disable GigE link negotiation */
1959 phy_ctrl = er32(PHY_CTRL);
1960 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
1962 ew32(PHY_CTRL, phy_ctrl);
1963
1964 /* Call gig speed drop workaround on Giga disable before accessing
1965 * any PHY registers */
1966 e1000e_gig_downshift_workaround_ich8lan(hw);
1967
1968 /* unable to acquire PCS lock */
1969 return -E1000_ERR_PHY;
1970}
1971
1972/**
1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
1974 * @hw: pointer to the HW structure
1975 * @state: boolean value used to set the current Kumaran workaround state
1976 *
1977 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
1978 * /disabled - FALSE).
1979 **/
1980void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
1981 bool state)
1982{
1983 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1984
1985 if (hw->mac.type != e1000_ich8lan) {
1986 hw_dbg(hw, "Workaround applies to ICH8 only.\n");
1987 return;
1988 }
1989
1990 dev_spec->kmrn_lock_loss_workaround_enabled = state;
1991}
1992
1993/**
1994 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
1995 * @hw: pointer to the HW structure
1996 *
1997 * Workaround for 82566 power-down on D3 entry:
1998 * 1) disable gigabit link
1999 * 2) write VR power-down enable
2000 * 3) read it back
2001 * Continue if successful, else issue LCD reset and repeat
2002 **/
2003void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2004{
2005 u32 reg;
2006 u16 data;
2007 u8 retry = 0;
2008
2009 if (hw->phy.type != e1000_phy_igp_3)
2010 return;
2011
2012 /* Try the workaround twice (if needed) */
2013 do {
2014 /* Disable link */
2015 reg = er32(PHY_CTRL);
2016 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
2018 ew32(PHY_CTRL, reg);
2019
2020 /* Call gig speed drop workaround on Giga disable before
2021 * accessing any PHY registers */
2022 if (hw->mac.type == e1000_ich8lan)
2023 e1000e_gig_downshift_workaround_ich8lan(hw);
2024
2025 /* Write VR power-down enable */
2026 e1e_rphy(hw, IGP3_VR_CTRL, &data);
2027 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
2028 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
2029
2030 /* Read it back and test */
2031 e1e_rphy(hw, IGP3_VR_CTRL, &data);
2032 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
2033 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
2034 break;
2035
2036 /* Issue PHY reset and repeat at most one more time */
2037 reg = er32(CTRL);
2038 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
2039 retry++;
2040 } while (retry);
2041}
2042
2043/**
2044 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
2045 * @hw: pointer to the HW structure
2046 *
2047 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
2048 * LPLU, Giga disable, MDIC PHY reset):
2049 * 1) Set Kumeran Near-end loopback
2050 * 2) Clear Kumeran Near-end loopback
2051 * Should only be called for ICH8[m] devices with IGP_3 Phy.
2052 **/
2053void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2054{
2055 s32 ret_val;
2056 u16 reg_data;
2057
2058 if ((hw->mac.type != e1000_ich8lan) ||
2059 (hw->phy.type != e1000_phy_igp_3))
2060 return;
2061
2062 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
2063 &reg_data);
2064 if (ret_val)
2065 return;
2066 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
2067 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
2068 reg_data);
2069 if (ret_val)
2070 return;
2071 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
2072 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
2073 reg_data);
2074}
2075
2076/**
2077 * e1000_cleanup_led_ich8lan - Restore the default LED operation
2078 * @hw: pointer to the HW structure
2079 *
2080 * Return the LED back to the default configuration.
2081 **/
2082static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
2083{
2084 if (hw->phy.type == e1000_phy_ife)
2085 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
2086
2087 ew32(LEDCTL, hw->mac.ledctl_default);
2088 return 0;
2089}
2090
2091/**
2092 * e1000_led_on_ich8lan - Turn LED's on
2093 * @hw: pointer to the HW structure
2094 *
2095 * Turn on the LED's.
2096 **/
2097static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
2098{
2099 if (hw->phy.type == e1000_phy_ife)
2100 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
2101 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
2102
2103 ew32(LEDCTL, hw->mac.ledctl_mode2);
2104 return 0;
2105}
2106
2107/**
2108 * e1000_led_off_ich8lan - Turn LED's off
2109 * @hw: pointer to the HW structure
2110 *
2111 * Turn off the LED's.
2112 **/
2113static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
2114{
2115 if (hw->phy.type == e1000_phy_ife)
2116 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
2117 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
2118
2119 ew32(LEDCTL, hw->mac.ledctl_mode1);
2120 return 0;
2121}
2122
2123/**
2124 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
2125 * @hw: pointer to the HW structure
2126 *
2127 * Clears hardware counters specific to the silicon family and calls
2128 * clear_hw_cntrs_generic to clear all general purpose counters.
2129 **/
2130static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2131{
2132 u32 temp;
2133
2134 e1000e_clear_hw_cntrs_base(hw);
2135
2136 temp = er32(ALGNERRC);
2137 temp = er32(RXERRC);
2138 temp = er32(TNCRS);
2139 temp = er32(CEXTERR);
2140 temp = er32(TSCTC);
2141 temp = er32(TSCTFC);
2142
2143 temp = er32(MGTPRC);
2144 temp = er32(MGTPDC);
2145 temp = er32(MGTPTC);
2146
2147 temp = er32(IAC);
2148 temp = er32(ICRXOC);
2149
2150}
2151
2152static struct e1000_mac_operations ich8_mac_ops = {
2153 .mng_mode_enab = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
2154 .check_for_link = e1000e_check_for_copper_link,
2155 .cleanup_led = e1000_cleanup_led_ich8lan,
2156 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
2157 .get_bus_info = e1000_get_bus_info_ich8lan,
2158 .get_link_up_info = e1000_get_link_up_info_ich8lan,
2159 .led_on = e1000_led_on_ich8lan,
2160 .led_off = e1000_led_off_ich8lan,
2161 .mc_addr_list_update = e1000e_mc_addr_list_update_generic,
2162 .reset_hw = e1000_reset_hw_ich8lan,
2163 .init_hw = e1000_init_hw_ich8lan,
2164 .setup_link = e1000_setup_link_ich8lan,
2165 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
2166};
2167
2168static struct e1000_phy_operations ich8_phy_ops = {
2169 .acquire_phy = e1000_acquire_swflag_ich8lan,
2170 .check_reset_block = e1000_check_reset_block_ich8lan,
2171 .commit_phy = NULL,
2172 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan,
2173 .get_cfg_done = e1000e_get_cfg_done,
2174 .get_cable_length = e1000e_get_cable_length_igp_2,
2175 .get_phy_info = e1000_get_phy_info_ich8lan,
2176 .read_phy_reg = e1000e_read_phy_reg_igp,
2177 .release_phy = e1000_release_swflag_ich8lan,
2178 .reset_phy = e1000_phy_hw_reset_ich8lan,
2179 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
2180 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
2181 .write_phy_reg = e1000e_write_phy_reg_igp,
2182};
2183
2184static struct e1000_nvm_operations ich8_nvm_ops = {
2185 .acquire_nvm = e1000_acquire_swflag_ich8lan,
2186 .read_nvm = e1000_read_nvm_ich8lan,
2187 .release_nvm = e1000_release_swflag_ich8lan,
2188 .update_nvm = e1000_update_nvm_checksum_ich8lan,
2189 .valid_led_default = e1000_valid_led_default_ich8lan,
2190 .validate_nvm = e1000_validate_nvm_checksum_ich8lan,
2191 .write_nvm = e1000_write_nvm_ich8lan,
2192};
2193
2194struct e1000_info e1000_ich8_info = {
2195 .mac = e1000_ich8lan,
2196 .flags = FLAG_HAS_WOL
2197 | FLAG_RX_CSUM_ENABLED
2198 | FLAG_HAS_CTRLEXT_ON_LOAD
2199 | FLAG_HAS_AMT
2200 | FLAG_HAS_FLASH
2201 | FLAG_APME_IN_WUC,
2202 .pba = 8,
2203 .get_invariants = e1000_get_invariants_ich8lan,
2204 .mac_ops = &ich8_mac_ops,
2205 .phy_ops = &ich8_phy_ops,
2206 .nvm_ops = &ich8_nvm_ops,
2207};
2208
2209struct e1000_info e1000_ich9_info = {
2210 .mac = e1000_ich9lan,
2211 .flags = FLAG_HAS_JUMBO_FRAMES
2212 | FLAG_HAS_WOL
2213 | FLAG_RX_CSUM_ENABLED
2214 | FLAG_HAS_CTRLEXT_ON_LOAD
2215 | FLAG_HAS_AMT
2216 | FLAG_HAS_ERT
2217 | FLAG_HAS_FLASH
2218 | FLAG_APME_IN_WUC,
2219 .pba = 10,
2220 .get_invariants = e1000_get_invariants_ich8lan,
2221 .mac_ops = &ich8_mac_ops,
2222 .phy_ops = &ich8_phy_ops,
2223 .nvm_ops = &ich8_nvm_ops,
2224};
2225
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
new file mode 100644
index 000000000000..3bbfe605e111
--- /dev/null
+++ b/drivers/net/e1000e/lib.c
@@ -0,0 +1,2487 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33
34#include "e1000.h"
35
36enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42};
43
44#define E1000_FACTPS_MNGCG 0x20000000
45
46#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management
47 * Technology signature */
48
49/**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
52 *
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58{
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87}
88
89/**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
94 *
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99{
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102}
103
104/**
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
108 *
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
112 **/
113void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114{
115 u32 i;
116
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130}
131
132/**
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
137 *
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
140 **/
141void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142{
143 u32 rar_low, rar_high;
144
145 /* HW expects these in little endian so we reverse the byte order
146 * from network order (big endian) to little endian
147 */
148 rar_low = ((u32) addr[0] |
149 ((u32) addr[1] << 8) |
150 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
151
152 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
153
154 rar_high |= E1000_RAH_AV;
155
156 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
158}
159
160/**
161 * e1000_mta_set - Set multicast filter table address
162 * @hw: pointer to the HW structure
163 * @hash_value: determines the MTA register and bit to set
164 *
165 * The multicast table address is a register array of 32-bit registers.
166 * The hash_value is used to determine what register the bit is in, the
167 * current value is read, the new bit is OR'd in and the new value is
168 * written back into the register.
169 **/
170static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171{
172 u32 hash_bit, hash_reg, mta;
173
174 /* The MTA is a register array of 32-bit registers. It is
175 * treated like an array of (32*mta_reg_count) bits. We want to
176 * set bit BitArray[hash_value]. So we figure out what register
177 * the bit is in, read it, OR in the new bit, then write
178 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
179 * mask to bits 31:5 of the hash value which gives us the
180 * register we're modifying. The hash bit within that register
181 * is determined by the lower 5 bits of the hash value.
182 */
183 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
184 hash_bit = hash_value & 0x1F;
185
186 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
187
188 mta |= (1 << hash_bit);
189
190 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
191 e1e_flush();
192}
193
194/**
195 * e1000_hash_mc_addr - Generate a multicast hash value
196 * @hw: pointer to the HW structure
197 * @mc_addr: pointer to a multicast address
198 *
199 * Generates a multicast address hash value which is used to determine
200 * the multicast filter table array address and new table value. See
201 * e1000_mta_set_generic()
202 **/
203static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
204{
205 u32 hash_value, hash_mask;
206 u8 bit_shift = 0;
207
208 /* Register count multiplied by bits per register */
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210
211 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
212 * where 0xFF would still fall within the hash mask. */
213 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++;
215
216 /* The portion of the address that is used for the hash table
217 * is determined by the mc_filter_type setting.
218 * The algorithm is such that there is a total of 8 bits of shifting.
219 * The bit_shift for a mc_filter_type of 0 represents the number of
220 * left-shifts where the MSB of mc_addr[5] would still fall within
221 * the hash_mask. Case 0 does this exactly. Since there are a total
222 * of 8 bits of shifting, then mc_addr[4] will shift right the
223 * remaining number of bits. Thus 8 - bit_shift. The rest of the
224 * cases are a variation of this algorithm...essentially raising the
225 * number of bits to shift mc_addr[5] left, while still keeping the
226 * 8-bit shifting total.
227 */
228 /* For example, given the following Destination MAC Address and an
229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230 * we can see that the bit_shift for case 0 is 4. These are the hash
231 * values resulting from each mc_filter_type...
232 * [0] [1] [2] [3] [4] [5]
233 * 01 AA 00 12 34 56
234 * LSB MSB
235 *
236 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
237 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
238 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
239 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
240 */
241 switch (hw->mac.mc_filter_type) {
242 default:
243 case 0:
244 break;
245 case 1:
246 bit_shift += 1;
247 break;
248 case 2:
249 bit_shift += 2;
250 break;
251 case 3:
252 bit_shift += 4;
253 break;
254 }
255
256 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
257 (((u16) mc_addr[5]) << bit_shift)));
258
259 return hash_value;
260}
261
262/**
263 * e1000e_mc_addr_list_update_generic - Update Multicast addresses
264 * @hw: pointer to the HW structure
265 * @mc_addr_list: array of multicast addresses to program
266 * @mc_addr_count: number of multicast addresses to program
267 * @rar_used_count: the first RAR register free to program
268 * @rar_count: total number of supported Receive Address Registers
269 *
270 * Updates the Receive Address Registers and Multicast Table Array.
271 * The caller must have a packed mc_addr_list of multicast addresses.
272 * The parameter rar_count will usually be hw->mac.rar_entry_count
273 * unless there are workarounds that change this.
274 **/
275void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
276 u8 *mc_addr_list, u32 mc_addr_count,
277 u32 rar_used_count, u32 rar_count)
278{
279 u32 hash_value;
280 u32 i;
281
282 /* Load the first set of multicast addresses into the exact
283 * filters (RAR). If there are not enough to fill the RAR
284 * array, clear the filters.
285 */
286 for (i = rar_used_count; i < rar_count; i++) {
287 if (mc_addr_count) {
288 e1000e_rar_set(hw, mc_addr_list, i);
289 mc_addr_count--;
290 mc_addr_list += ETH_ALEN;
291 } else {
292 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
293 e1e_flush();
294 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
295 e1e_flush();
296 }
297 }
298
299 /* Clear the old settings from the MTA */
300 hw_dbg(hw, "Clearing MTA\n");
301 for (i = 0; i < hw->mac.mta_reg_count; i++) {
302 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
303 e1e_flush();
304 }
305
306 /* Load any remaining multicast addresses into the hash table. */
307 for (; mc_addr_count > 0; mc_addr_count--) {
308 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
309 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
310 e1000_mta_set(hw, hash_value);
311 mc_addr_list += ETH_ALEN;
312 }
313}
314
315/**
316 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
317 * @hw: pointer to the HW structure
318 *
319 * Clears the base hardware counters by reading the counter registers.
320 **/
321void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
322{
323 u32 temp;
324
325 temp = er32(CRCERRS);
326 temp = er32(SYMERRS);
327 temp = er32(MPC);
328 temp = er32(SCC);
329 temp = er32(ECOL);
330 temp = er32(MCC);
331 temp = er32(LATECOL);
332 temp = er32(COLC);
333 temp = er32(DC);
334 temp = er32(SEC);
335 temp = er32(RLEC);
336 temp = er32(XONRXC);
337 temp = er32(XONTXC);
338 temp = er32(XOFFRXC);
339 temp = er32(XOFFTXC);
340 temp = er32(FCRUC);
341 temp = er32(GPRC);
342 temp = er32(BPRC);
343 temp = er32(MPRC);
344 temp = er32(GPTC);
345 temp = er32(GORCL);
346 temp = er32(GORCH);
347 temp = er32(GOTCL);
348 temp = er32(GOTCH);
349 temp = er32(RNBC);
350 temp = er32(RUC);
351 temp = er32(RFC);
352 temp = er32(ROC);
353 temp = er32(RJC);
354 temp = er32(TORL);
355 temp = er32(TORH);
356 temp = er32(TOTL);
357 temp = er32(TOTH);
358 temp = er32(TPR);
359 temp = er32(TPT);
360 temp = er32(MPTC);
361 temp = er32(BPTC);
362}
363
364/**
365 * e1000e_check_for_copper_link - Check for link (Copper)
366 * @hw: pointer to the HW structure
367 *
368 * Checks to see of the link status of the hardware has changed. If a
369 * change in link status has been detected, then we read the PHY registers
370 * to get the current speed/duplex if link exists.
371 **/
372s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
373{
374 struct e1000_mac_info *mac = &hw->mac;
375 s32 ret_val;
376 bool link;
377
378 /* We only want to go out to the PHY registers to see if Auto-Neg
379 * has completed and/or if our link status has changed. The
380 * get_link_status flag is set upon receiving a Link Status
381 * Change or Rx Sequence Error interrupt.
382 */
383 if (!mac->get_link_status)
384 return 0;
385
386 /* First we want to see if the MII Status Register reports
387 * link. If so, then we want to get the current speed/duplex
388 * of the PHY.
389 */
390 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
391 if (ret_val)
392 return ret_val;
393
394 if (!link)
395 return ret_val; /* No link detected */
396
397 mac->get_link_status = 0;
398
399 /* Check if there was DownShift, must be checked
400 * immediately after link-up */
401 e1000e_check_downshift(hw);
402
403 /* If we are forcing speed/duplex, then we simply return since
404 * we have already determined whether we have link or not.
405 */
406 if (!mac->autoneg) {
407 ret_val = -E1000_ERR_CONFIG;
408 return ret_val;
409 }
410
411 /* Auto-Neg is enabled. Auto Speed Detection takes care
412 * of MAC speed/duplex configuration. So we only need to
413 * configure Collision Distance in the MAC.
414 */
415 e1000e_config_collision_dist(hw);
416
417 /* Configure Flow Control now that Auto-Neg has completed.
418 * First, we need to restore the desired flow control
419 * settings because we may have had to re-autoneg with a
420 * different link partner.
421 */
422 ret_val = e1000e_config_fc_after_link_up(hw);
423 if (ret_val) {
424 hw_dbg(hw, "Error configuring flow control\n");
425 }
426
427 return ret_val;
428}
429
430/**
431 * e1000e_check_for_fiber_link - Check for link (Fiber)
432 * @hw: pointer to the HW structure
433 *
434 * Checks for link up on the hardware. If link is not up and we have
435 * a signal, then we need to force link up.
436 **/
437s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
438{
439 struct e1000_mac_info *mac = &hw->mac;
440 u32 rxcw;
441 u32 ctrl;
442 u32 status;
443 s32 ret_val;
444
445 ctrl = er32(CTRL);
446 status = er32(STATUS);
447 rxcw = er32(RXCW);
448
449 /* If we don't have link (auto-negotiation failed or link partner
450 * cannot auto-negotiate), the cable is plugged in (we have signal),
451 * and our link partner is not trying to auto-negotiate with us (we
452 * are receiving idles or data), we need to force link up. We also
453 * need to give auto-negotiation time to complete, in case the cable
454 * was just plugged in. The autoneg_failed flag does this.
455 */
456 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
457 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
458 (!(rxcw & E1000_RXCW_C))) {
459 if (mac->autoneg_failed == 0) {
460 mac->autoneg_failed = 1;
461 return 0;
462 }
463 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
464
465 /* Disable auto-negotiation in the TXCW register */
466 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
467
468 /* Force link-up and also force full-duplex. */
469 ctrl = er32(CTRL);
470 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
471 ew32(CTRL, ctrl);
472
473 /* Configure Flow Control after forcing link up. */
474 ret_val = e1000e_config_fc_after_link_up(hw);
475 if (ret_val) {
476 hw_dbg(hw, "Error configuring flow control\n");
477 return ret_val;
478 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480 /* If we are forcing link and we are receiving /C/ ordered
481 * sets, re-enable auto-negotiation in the TXCW register
482 * and disable forced link in the Device Control register
483 * in an attempt to auto-negotiate with our link partner.
484 */
485 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
486 ew32(TXCW, mac->txcw);
487 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
488
489 mac->serdes_has_link = 1;
490 }
491
492 return 0;
493}
494
495/**
496 * e1000e_check_for_serdes_link - Check for link (Serdes)
497 * @hw: pointer to the HW structure
498 *
499 * Checks for link up on the hardware. If link is not up and we have
500 * a signal, then we need to force link up.
501 **/
502s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
503{
504 struct e1000_mac_info *mac = &hw->mac;
505 u32 rxcw;
506 u32 ctrl;
507 u32 status;
508 s32 ret_val;
509
510 ctrl = er32(CTRL);
511 status = er32(STATUS);
512 rxcw = er32(RXCW);
513
514 /* If we don't have link (auto-negotiation failed or link partner
515 * cannot auto-negotiate), and our link partner is not trying to
516 * auto-negotiate with us (we are receiving idles or data),
517 * we need to force link up. We also need to give auto-negotiation
518 * time to complete.
519 */
520 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
521 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
522 if (mac->autoneg_failed == 0) {
523 mac->autoneg_failed = 1;
524 return 0;
525 }
526 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
527
528 /* Disable auto-negotiation in the TXCW register */
529 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
530
531 /* Force link-up and also force full-duplex. */
532 ctrl = er32(CTRL);
533 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
534 ew32(CTRL, ctrl);
535
536 /* Configure Flow Control after forcing link up. */
537 ret_val = e1000e_config_fc_after_link_up(hw);
538 if (ret_val) {
539 hw_dbg(hw, "Error configuring flow control\n");
540 return ret_val;
541 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543 /* If we are forcing link and we are receiving /C/ ordered
544 * sets, re-enable auto-negotiation in the TXCW register
545 * and disable forced link in the Device Control register
546 * in an attempt to auto-negotiate with our link partner.
547 */
548 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
549 ew32(TXCW, mac->txcw);
550 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
551
552 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554 /* If we force link for non-auto-negotiation switch, check
555 * link status based on MAC synchronization for internal
556 * serdes media type.
557 */
558 /* SYNCH bit and IV bit are sticky. */
559 udelay(10);
560 if (E1000_RXCW_SYNCH & er32(RXCW)) {
561 if (!(rxcw & E1000_RXCW_IV)) {
562 mac->serdes_has_link = 1;
563 hw_dbg(hw, "SERDES: Link is up.\n");
564 }
565 } else {
566 mac->serdes_has_link = 0;
567 hw_dbg(hw, "SERDES: Link is down.\n");
568 }
569 }
570
571 if (E1000_TXCW_ANE & er32(TXCW)) {
572 status = er32(STATUS);
573 mac->serdes_has_link = (status & E1000_STATUS_LU);
574 }
575
576 return 0;
577}
578
579/**
580 * e1000_set_default_fc_generic - Set flow control default values
581 * @hw: pointer to the HW structure
582 *
583 * Read the EEPROM for the default values for flow control and store the
584 * values.
585 **/
586static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587{
588 struct e1000_mac_info *mac = &hw->mac;
589 s32 ret_val;
590 u16 nvm_data;
591
592 if (mac->fc != e1000_fc_default)
593 return 0;
594
595 /* Read and store word 0x0F of the EEPROM. This word contains bits
596 * that determine the hardware's default PAUSE (flow control) mode,
597 * a bit that determines whether the HW defaults to enabling or
598 * disabling auto-negotiation, and the direction of the
599 * SW defined pins. If there is no SW over-ride of the flow
600 * control setting, then the variable hw->fc will
601 * be initialized based on a value in the EEPROM.
602 */
603 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
604
605 if (ret_val) {
606 hw_dbg(hw, "NVM Read Error\n");
607 return ret_val;
608 }
609
610 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
611 mac->fc = e1000_fc_none;
612 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
613 NVM_WORD0F_ASM_DIR)
614 mac->fc = e1000_fc_tx_pause;
615 else
616 mac->fc = e1000_fc_full;
617
618 return 0;
619}
620
621/**
622 * e1000e_setup_link - Setup flow control and link settings
623 * @hw: pointer to the HW structure
624 *
625 * Determines which flow control settings to use, then configures flow
626 * control. Calls the appropriate media-specific link configuration
627 * function. Assuming the adapter has a valid link partner, a valid link
628 * should be established. Assumes the hardware has previously been reset
629 * and the transmitter and receiver are not enabled.
630 **/
631s32 e1000e_setup_link(struct e1000_hw *hw)
632{
633 struct e1000_mac_info *mac = &hw->mac;
634 s32 ret_val;
635
636 /* In the case of the phy reset being blocked, we already have a link.
637 * We do not need to set it up again.
638 */
639 if (e1000_check_reset_block(hw))
640 return 0;
641
642 ret_val = e1000_set_default_fc_generic(hw);
643 if (ret_val)
644 return ret_val;
645
646 /* We want to save off the original Flow Control configuration just
647 * in case we get disconnected and then reconnected into a different
648 * hub or switch with different Flow Control capabilities.
649 */
650 mac->original_fc = mac->fc;
651
652 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
653
654 /* Call the necessary media_type subroutine to configure the link. */
655 ret_val = mac->ops.setup_physical_interface(hw);
656 if (ret_val)
657 return ret_val;
658
659 /* Initialize the flow control address, type, and PAUSE timer
660 * registers to their default values. This is done even if flow
661 * control is disabled, because it does not hurt anything to
662 * initialize these registers.
663 */
664 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
665 ew32(FCT, FLOW_CONTROL_TYPE);
666 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
667 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
668
669 ew32(FCTTV, mac->fc_pause_time);
670
671 return e1000e_set_fc_watermarks(hw);
672}
673
674/**
675 * e1000_commit_fc_settings_generic - Configure flow control
676 * @hw: pointer to the HW structure
677 *
678 * Write the flow control settings to the Transmit Config Word Register (TXCW)
679 * base on the flow control settings in e1000_mac_info.
680 **/
681static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
682{
683 struct e1000_mac_info *mac = &hw->mac;
684 u32 txcw;
685
686 /* Check for a software override of the flow control settings, and
687 * setup the device accordingly. If auto-negotiation is enabled, then
688 * software will have to set the "PAUSE" bits to the correct value in
689 * the Transmit Config Word Register (TXCW) and re-start auto-
690 * negotiation. However, if auto-negotiation is disabled, then
691 * software will have to manually configure the two flow control enable
692 * bits in the CTRL register.
693 *
694 * The possible values of the "fc" parameter are:
695 * 0: Flow control is completely disabled
696 * 1: Rx flow control is enabled (we can receive pause frames,
697 * but not send pause frames).
698 * 2: Tx flow control is enabled (we can send pause frames but we
699 * do not support receiving pause frames).
700 * 3: Both Rx and TX flow control (symmetric) are enabled.
701 */
702 switch (mac->fc) {
703 case e1000_fc_none:
704 /* Flow control completely disabled by a software over-ride. */
705 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
706 break;
707 case e1000_fc_rx_pause:
708 /* RX Flow control is enabled and TX Flow control is disabled
709 * by a software over-ride. Since there really isn't a way to
710 * advertise that we are capable of RX Pause ONLY, we will
711 * advertise that we support both symmetric and asymmetric RX
712 * PAUSE. Later, we will disable the adapter's ability to send
713 * PAUSE frames.
714 */
715 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
716 break;
717 case e1000_fc_tx_pause:
718 /* TX Flow control is enabled, and RX Flow control is disabled,
719 * by a software over-ride.
720 */
721 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
722 break;
723 case e1000_fc_full:
724 /* Flow control (both RX and TX) is enabled by a software
725 * over-ride.
726 */
727 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
728 break;
729 default:
730 hw_dbg(hw, "Flow control param set incorrectly\n");
731 return -E1000_ERR_CONFIG;
732 break;
733 }
734
735 ew32(TXCW, txcw);
736 mac->txcw = txcw;
737
738 return 0;
739}
740
741/**
742 * e1000_poll_fiber_serdes_link_generic - Poll for link up
743 * @hw: pointer to the HW structure
744 *
745 * Polls for link up by reading the status register, if link fails to come
746 * up with auto-negotiation, then the link is forced if a signal is detected.
747 **/
748static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
749{
750 struct e1000_mac_info *mac = &hw->mac;
751 u32 i, status;
752 s32 ret_val;
753
754 /* If we have a signal (the cable is plugged in, or assumed true for
755 * serdes media) then poll for a "Link-Up" indication in the Device
756 * Status Register. Time-out if a link isn't seen in 500 milliseconds
757 * seconds (Auto-negotiation should complete in less than 500
758 * milliseconds even if the other end is doing it in SW).
759 */
760 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
761 msleep(10);
762 status = er32(STATUS);
763 if (status & E1000_STATUS_LU)
764 break;
765 }
766 if (i == FIBER_LINK_UP_LIMIT) {
767 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
768 mac->autoneg_failed = 1;
769 /* AutoNeg failed to achieve a link, so we'll call
770 * mac->check_for_link. This routine will force the
771 * link up if we detect a signal. This will allow us to
772 * communicate with non-autonegotiating link partners.
773 */
774 ret_val = mac->ops.check_for_link(hw);
775 if (ret_val) {
776 hw_dbg(hw, "Error while checking for link\n");
777 return ret_val;
778 }
779 mac->autoneg_failed = 0;
780 } else {
781 mac->autoneg_failed = 0;
782 hw_dbg(hw, "Valid Link Found\n");
783 }
784
785 return 0;
786}
787
788/**
789 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
790 * @hw: pointer to the HW structure
791 *
792 * Configures collision distance and flow control for fiber and serdes
793 * links. Upon successful setup, poll for link.
794 **/
795s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
796{
797 u32 ctrl;
798 s32 ret_val;
799
800 ctrl = er32(CTRL);
801
802 /* Take the link out of reset */
803 ctrl &= ~E1000_CTRL_LRST;
804
805 e1000e_config_collision_dist(hw);
806
807 ret_val = e1000_commit_fc_settings_generic(hw);
808 if (ret_val)
809 return ret_val;
810
811 /* Since auto-negotiation is enabled, take the link out of reset (the
812 * link will be in reset, because we previously reset the chip). This
813 * will restart auto-negotiation. If auto-negotiation is successful
814 * then the link-up status bit will be set and the flow control enable
815 * bits (RFCE and TFCE) will be set according to their negotiated value.
816 */
817 hw_dbg(hw, "Auto-negotiation enabled\n");
818
819 ew32(CTRL, ctrl);
820 e1e_flush();
821 msleep(1);
822
823 /* For these adapters, the SW defineable pin 1 is set when the optics
824 * detect a signal. If we have a signal, then poll for a "Link-Up"
825 * indication.
826 */
827 if (hw->media_type == e1000_media_type_internal_serdes ||
828 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
829 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
830 } else {
831 hw_dbg(hw, "No signal detected\n");
832 }
833
834 return 0;
835}
836
837/**
838 * e1000e_config_collision_dist - Configure collision distance
839 * @hw: pointer to the HW structure
840 *
841 * Configures the collision distance to the default value and is used
842 * during link setup. Currently no func pointer exists and all
843 * implementations are handled in the generic version of this function.
844 **/
845void e1000e_config_collision_dist(struct e1000_hw *hw)
846{
847 u32 tctl;
848
849 tctl = er32(TCTL);
850
851 tctl &= ~E1000_TCTL_COLD;
852 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
853
854 ew32(TCTL, tctl);
855 e1e_flush();
856}
857
858/**
859 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
860 * @hw: pointer to the HW structure
861 *
862 * Sets the flow control high/low threshold (watermark) registers. If
863 * flow control XON frame transmission is enabled, then set XON frame
864 * tansmission as well.
865 **/
866s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
867{
868 struct e1000_mac_info *mac = &hw->mac;
869 u32 fcrtl = 0, fcrth = 0;
870
871 /* Set the flow control receive threshold registers. Normally,
872 * these registers will be set to a default threshold that may be
873 * adjusted later by the driver's runtime code. However, if the
874 * ability to transmit pause frames is not enabled, then these
875 * registers will be set to 0.
876 */
877 if (mac->fc & e1000_fc_tx_pause) {
878 /* We need to set up the Receive Threshold high and low water
879 * marks as well as (optionally) enabling the transmission of
880 * XON frames.
881 */
882 fcrtl = mac->fc_low_water;
883 fcrtl |= E1000_FCRTL_XONE;
884 fcrth = mac->fc_high_water;
885 }
886 ew32(FCRTL, fcrtl);
887 ew32(FCRTH, fcrth);
888
889 return 0;
890}
891
892/**
893 * e1000e_force_mac_fc - Force the MAC's flow control settings
894 * @hw: pointer to the HW structure
895 *
896 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
897 * device control register to reflect the adapter settings. TFCE and RFCE
898 * need to be explicitly set by software when a copper PHY is used because
899 * autonegotiation is managed by the PHY rather than the MAC. Software must
900 * also configure these bits when link is forced on a fiber connection.
901 **/
902s32 e1000e_force_mac_fc(struct e1000_hw *hw)
903{
904 struct e1000_mac_info *mac = &hw->mac;
905 u32 ctrl;
906
907 ctrl = er32(CTRL);
908
909 /* Because we didn't get link via the internal auto-negotiation
910 * mechanism (we either forced link or we got link via PHY
911 * auto-neg), we have to manually enable/disable transmit an
912 * receive flow control.
913 *
914 * The "Case" statement below enables/disable flow control
915 * according to the "mac->fc" parameter.
916 *
917 * The possible values of the "fc" parameter are:
918 * 0: Flow control is completely disabled
919 * 1: Rx flow control is enabled (we can receive pause
920 * frames but not send pause frames).
921 * 2: Tx flow control is enabled (we can send pause frames
922 * frames but we do not receive pause frames).
923 * 3: Both Rx and TX flow control (symmetric) is enabled.
924 * other: No other values should be possible at this point.
925 */
926 hw_dbg(hw, "mac->fc = %u\n", mac->fc);
927
928 switch (mac->fc) {
929 case e1000_fc_none:
930 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
931 break;
932 case e1000_fc_rx_pause:
933 ctrl &= (~E1000_CTRL_TFCE);
934 ctrl |= E1000_CTRL_RFCE;
935 break;
936 case e1000_fc_tx_pause:
937 ctrl &= (~E1000_CTRL_RFCE);
938 ctrl |= E1000_CTRL_TFCE;
939 break;
940 case e1000_fc_full:
941 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
942 break;
943 default:
944 hw_dbg(hw, "Flow control param set incorrectly\n");
945 return -E1000_ERR_CONFIG;
946 }
947
948 ew32(CTRL, ctrl);
949
950 return 0;
951}
952
953/**
954 * e1000e_config_fc_after_link_up - Configures flow control after link
955 * @hw: pointer to the HW structure
956 *
957 * Checks the status of auto-negotiation after link up to ensure that the
958 * speed and duplex were not forced. If the link needed to be forced, then
959 * flow control needs to be forced also. If auto-negotiation is enabled
960 * and did not fail, then we configure flow control based on our link
961 * partner.
962 **/
963s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
964{
965 struct e1000_mac_info *mac = &hw->mac;
966 s32 ret_val = 0;
967 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
968 u16 speed, duplex;
969
970 /* Check for the case where we have fiber media and auto-neg failed
971 * so we had to force link. In this case, we need to force the
972 * configuration of the MAC to match the "fc" parameter.
973 */
974 if (mac->autoneg_failed) {
975 if (hw->media_type == e1000_media_type_fiber ||
976 hw->media_type == e1000_media_type_internal_serdes)
977 ret_val = e1000e_force_mac_fc(hw);
978 } else {
979 if (hw->media_type == e1000_media_type_copper)
980 ret_val = e1000e_force_mac_fc(hw);
981 }
982
983 if (ret_val) {
984 hw_dbg(hw, "Error forcing flow control settings\n");
985 return ret_val;
986 }
987
988 /* Check for the case where we have copper media and auto-neg is
989 * enabled. In this case, we need to check and see if Auto-Neg
990 * has completed, and if so, how the PHY and link partner has
991 * flow control configured.
992 */
993 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
994 /* Read the MII Status Register and check to see if AutoNeg
995 * has completed. We read this twice because this reg has
996 * some "sticky" (latched) bits.
997 */
998 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
999 if (ret_val)
1000 return ret_val;
1001 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1002 if (ret_val)
1003 return ret_val;
1004
1005 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1006 hw_dbg(hw, "Copper PHY and Auto Neg "
1007 "has not completed.\n");
1008 return ret_val;
1009 }
1010
1011 /* The AutoNeg process has completed, so we now need to
1012 * read both the Auto Negotiation Advertisement
1013 * Register (Address 4) and the Auto_Negotiation Base
1014 * Page Ability Register (Address 5) to determine how
1015 * flow control was negotiated.
1016 */
1017 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1018 if (ret_val)
1019 return ret_val;
1020 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1021 if (ret_val)
1022 return ret_val;
1023
1024 /* Two bits in the Auto Negotiation Advertisement Register
1025 * (Address 4) and two bits in the Auto Negotiation Base
1026 * Page Ability Register (Address 5) determine flow control
1027 * for both the PHY and the link partner. The following
1028 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1029 * 1999, describes these PAUSE resolution bits and how flow
1030 * control is determined based upon these settings.
1031 * NOTE: DC = Don't Care
1032 *
1033 * LOCAL DEVICE | LINK PARTNER
1034 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1035 *-------|---------|-------|---------|--------------------
1036 * 0 | 0 | DC | DC | e1000_fc_none
1037 * 0 | 1 | 0 | DC | e1000_fc_none
1038 * 0 | 1 | 1 | 0 | e1000_fc_none
1039 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1040 * 1 | 0 | 0 | DC | e1000_fc_none
1041 * 1 | DC | 1 | DC | e1000_fc_full
1042 * 1 | 1 | 0 | 0 | e1000_fc_none
1043 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1044 *
1045 */
1046 /* Are both PAUSE bits set to 1? If so, this implies
1047 * Symmetric Flow Control is enabled at both ends. The
1048 * ASM_DIR bits are irrelevant per the spec.
1049 *
1050 * For Symmetric Flow Control:
1051 *
1052 * LOCAL DEVICE | LINK PARTNER
1053 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1054 *-------|---------|-------|---------|--------------------
1055 * 1 | DC | 1 | DC | E1000_fc_full
1056 *
1057 */
1058 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1059 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1060 /* Now we need to check if the user selected RX ONLY
1061 * of pause frames. In this case, we had to advertise
1062 * FULL flow control because we could not advertise RX
1063 * ONLY. Hence, we must now check to see if we need to
1064 * turn OFF the TRANSMISSION of PAUSE frames.
1065 */
1066 if (mac->original_fc == e1000_fc_full) {
1067 mac->fc = e1000_fc_full;
1068 hw_dbg(hw, "Flow Control = FULL.\r\n");
1069 } else {
1070 mac->fc = e1000_fc_rx_pause;
1071 hw_dbg(hw, "Flow Control = "
1072 "RX PAUSE frames only.\r\n");
1073 }
1074 }
1075 /* For receiving PAUSE frames ONLY.
1076 *
1077 * LOCAL DEVICE | LINK PARTNER
1078 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1079 *-------|---------|-------|---------|--------------------
1080 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1081 *
1082 */
1083 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1084 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1085 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1086 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1087 mac->fc = e1000_fc_tx_pause;
1088 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1089 }
1090 /* For transmitting PAUSE frames ONLY.
1091 *
1092 * LOCAL DEVICE | LINK PARTNER
1093 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1094 *-------|---------|-------|---------|--------------------
1095 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1096 *
1097 */
1098 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1099 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1100 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1101 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1102 mac->fc = e1000_fc_rx_pause;
1103 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1104 }
1105 /* Per the IEEE spec, at this point flow control should be
1106 * disabled. However, we want to consider that we could
1107 * be connected to a legacy switch that doesn't advertise
1108 * desired flow control, but can be forced on the link
1109 * partner. So if we advertised no flow control, that is
1110 * what we will resolve to. If we advertised some kind of
1111 * receive capability (Rx Pause Only or Full Flow Control)
1112 * and the link partner advertised none, we will configure
1113 * ourselves to enable Rx Flow Control only. We can do
1114 * this safely for two reasons: If the link partner really
1115 * didn't want flow control enabled, and we enable Rx, no
1116 * harm done since we won't be receiving any PAUSE frames
1117 * anyway. If the intent on the link partner was to have
1118 * flow control enabled, then by us enabling RX only, we
1119 * can at least receive pause frames and process them.
1120 * This is a good idea because in most cases, since we are
1121 * predominantly a server NIC, more times than not we will
1122 * be asked to delay transmission of packets than asking
1123 * our link partner to pause transmission of frames.
1124 */
1125 else if ((mac->original_fc == e1000_fc_none) ||
1126 (mac->original_fc == e1000_fc_tx_pause)) {
1127 mac->fc = e1000_fc_none;
1128 hw_dbg(hw, "Flow Control = NONE.\r\n");
1129 } else {
1130 mac->fc = e1000_fc_rx_pause;
1131 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1132 }
1133
1134 /* Now we need to do one last check... If we auto-
1135 * negotiated to HALF DUPLEX, flow control should not be
1136 * enabled per IEEE 802.3 spec.
1137 */
1138 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1139 if (ret_val) {
1140 hw_dbg(hw, "Error getting link speed and duplex\n");
1141 return ret_val;
1142 }
1143
1144 if (duplex == HALF_DUPLEX)
1145 mac->fc = e1000_fc_none;
1146
1147 /* Now we call a subroutine to actually force the MAC
1148 * controller to use the correct flow control settings.
1149 */
1150 ret_val = e1000e_force_mac_fc(hw);
1151 if (ret_val) {
1152 hw_dbg(hw, "Error forcing flow control settings\n");
1153 return ret_val;
1154 }
1155 }
1156
1157 return 0;
1158}
1159
1160/**
1161 * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
1162 * @hw: pointer to the HW structure
1163 * @speed: stores the current speed
1164 * @duplex: stores the current duplex
1165 *
1166 * Read the status register for the current speed/duplex and store the current
1167 * speed and duplex for copper connections.
1168 **/
1169s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1170{
1171 u32 status;
1172
1173 status = er32(STATUS);
1174 if (status & E1000_STATUS_SPEED_1000) {
1175 *speed = SPEED_1000;
1176 hw_dbg(hw, "1000 Mbs, ");
1177 } else if (status & E1000_STATUS_SPEED_100) {
1178 *speed = SPEED_100;
1179 hw_dbg(hw, "100 Mbs, ");
1180 } else {
1181 *speed = SPEED_10;
1182 hw_dbg(hw, "10 Mbs, ");
1183 }
1184
1185 if (status & E1000_STATUS_FD) {
1186 *duplex = FULL_DUPLEX;
1187 hw_dbg(hw, "Full Duplex\n");
1188 } else {
1189 *duplex = HALF_DUPLEX;
1190 hw_dbg(hw, "Half Duplex\n");
1191 }
1192
1193 return 0;
1194}
1195
1196/**
1197 * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
1198 * @hw: pointer to the HW structure
1199 * @speed: stores the current speed
1200 * @duplex: stores the current duplex
1201 *
1202 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1203 * for fiber/serdes links.
1204 **/
1205s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1206{
1207 *speed = SPEED_1000;
1208 *duplex = FULL_DUPLEX;
1209
1210 return 0;
1211}
1212
1213/**
1214 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1215 * @hw: pointer to the HW structure
1216 *
1217 * Acquire the HW semaphore to access the PHY or NVM
1218 **/
1219s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1220{
1221 u32 swsm;
1222 s32 timeout = hw->nvm.word_size + 1;
1223 s32 i = 0;
1224
1225 /* Get the SW semaphore */
1226 while (i < timeout) {
1227 swsm = er32(SWSM);
1228 if (!(swsm & E1000_SWSM_SMBI))
1229 break;
1230
1231 udelay(50);
1232 i++;
1233 }
1234
1235 if (i == timeout) {
1236 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1237 return -E1000_ERR_NVM;
1238 }
1239
1240 /* Get the FW semaphore. */
1241 for (i = 0; i < timeout; i++) {
1242 swsm = er32(SWSM);
1243 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1244
1245 /* Semaphore acquired if bit latched */
1246 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1247 break;
1248
1249 udelay(50);
1250 }
1251
1252 if (i == timeout) {
1253 /* Release semaphores */
1254 e1000e_put_hw_semaphore(hw);
1255 hw_dbg(hw, "Driver can't access the NVM\n");
1256 return -E1000_ERR_NVM;
1257 }
1258
1259 return 0;
1260}
1261
1262/**
1263 * e1000e_put_hw_semaphore - Release hardware semaphore
1264 * @hw: pointer to the HW structure
1265 *
1266 * Release hardware semaphore used to access the PHY or NVM
1267 **/
1268void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1269{
1270 u32 swsm;
1271
1272 swsm = er32(SWSM);
1273 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1274 ew32(SWSM, swsm);
1275}
1276
1277/**
1278 * e1000e_get_auto_rd_done - Check for auto read completion
1279 * @hw: pointer to the HW structure
1280 *
1281 * Check EEPROM for Auto Read done bit.
1282 **/
1283s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1284{
1285 s32 i = 0;
1286
1287 while (i < AUTO_READ_DONE_TIMEOUT) {
1288 if (er32(EECD) & E1000_EECD_AUTO_RD)
1289 break;
1290 msleep(1);
1291 i++;
1292 }
1293
1294 if (i == AUTO_READ_DONE_TIMEOUT) {
1295 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1296 return -E1000_ERR_RESET;
1297 }
1298
1299 return 0;
1300}
1301
1302/**
1303 * e1000e_valid_led_default - Verify a valid default LED config
1304 * @hw: pointer to the HW structure
1305 * @data: pointer to the NVM (EEPROM)
1306 *
1307 * Read the EEPROM for the current default LED configuration. If the
1308 * LED configuration is not valid, set to a valid LED configuration.
1309 **/
1310s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1311{
1312 s32 ret_val;
1313
1314 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1315 if (ret_val) {
1316 hw_dbg(hw, "NVM Read Error\n");
1317 return ret_val;
1318 }
1319
1320 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1321 *data = ID_LED_DEFAULT;
1322
1323 return 0;
1324}
1325
1326/**
1327 * e1000e_id_led_init -
1328 * @hw: pointer to the HW structure
1329 *
1330 **/
1331s32 e1000e_id_led_init(struct e1000_hw *hw)
1332{
1333 struct e1000_mac_info *mac = &hw->mac;
1334 s32 ret_val;
1335 const u32 ledctl_mask = 0x000000FF;
1336 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1337 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1338 u16 data, i, temp;
1339 const u16 led_mask = 0x0F;
1340
1341 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1342 if (ret_val)
1343 return ret_val;
1344
1345 mac->ledctl_default = er32(LEDCTL);
1346 mac->ledctl_mode1 = mac->ledctl_default;
1347 mac->ledctl_mode2 = mac->ledctl_default;
1348
1349 for (i = 0; i < 4; i++) {
1350 temp = (data >> (i << 2)) & led_mask;
1351 switch (temp) {
1352 case ID_LED_ON1_DEF2:
1353 case ID_LED_ON1_ON2:
1354 case ID_LED_ON1_OFF2:
1355 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1356 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1357 break;
1358 case ID_LED_OFF1_DEF2:
1359 case ID_LED_OFF1_ON2:
1360 case ID_LED_OFF1_OFF2:
1361 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1362 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1363 break;
1364 default:
1365 /* Do nothing */
1366 break;
1367 }
1368 switch (temp) {
1369 case ID_LED_DEF1_ON2:
1370 case ID_LED_ON1_ON2:
1371 case ID_LED_OFF1_ON2:
1372 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1373 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1374 break;
1375 case ID_LED_DEF1_OFF2:
1376 case ID_LED_ON1_OFF2:
1377 case ID_LED_OFF1_OFF2:
1378 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1379 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1380 break;
1381 default:
1382 /* Do nothing */
1383 break;
1384 }
1385 }
1386
1387 return 0;
1388}
1389
1390/**
1391 * e1000e_cleanup_led_generic - Set LED config to default operation
1392 * @hw: pointer to the HW structure
1393 *
1394 * Remove the current LED configuration and set the LED configuration
1395 * to the default value, saved from the EEPROM.
1396 **/
1397s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1398{
1399 ew32(LEDCTL, hw->mac.ledctl_default);
1400 return 0;
1401}
1402
1403/**
1404 * e1000e_blink_led - Blink LED
1405 * @hw: pointer to the HW structure
1406 *
1407 * Blink the led's which are set to be on.
1408 **/
1409s32 e1000e_blink_led(struct e1000_hw *hw)
1410{
1411 u32 ledctl_blink = 0;
1412 u32 i;
1413
1414 if (hw->media_type == e1000_media_type_fiber) {
1415 /* always blink LED0 for PCI-E fiber */
1416 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1417 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1418 } else {
1419 /* set the blink bit for each LED that's "on" (0x0E)
1420 * in ledctl_mode2 */
1421 ledctl_blink = hw->mac.ledctl_mode2;
1422 for (i = 0; i < 4; i++)
1423 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1424 E1000_LEDCTL_MODE_LED_ON)
1425 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1426 (i * 8));
1427 }
1428
1429 ew32(LEDCTL, ledctl_blink);
1430
1431 return 0;
1432}
1433
1434/**
1435 * e1000e_led_on_generic - Turn LED on
1436 * @hw: pointer to the HW structure
1437 *
1438 * Turn LED on.
1439 **/
1440s32 e1000e_led_on_generic(struct e1000_hw *hw)
1441{
1442 u32 ctrl;
1443
1444 switch (hw->media_type) {
1445 case e1000_media_type_fiber:
1446 ctrl = er32(CTRL);
1447 ctrl &= ~E1000_CTRL_SWDPIN0;
1448 ctrl |= E1000_CTRL_SWDPIO0;
1449 ew32(CTRL, ctrl);
1450 break;
1451 case e1000_media_type_copper:
1452 ew32(LEDCTL, hw->mac.ledctl_mode2);
1453 break;
1454 default:
1455 break;
1456 }
1457
1458 return 0;
1459}
1460
1461/**
1462 * e1000e_led_off_generic - Turn LED off
1463 * @hw: pointer to the HW structure
1464 *
1465 * Turn LED off.
1466 **/
1467s32 e1000e_led_off_generic(struct e1000_hw *hw)
1468{
1469 u32 ctrl;
1470
1471 switch (hw->media_type) {
1472 case e1000_media_type_fiber:
1473 ctrl = er32(CTRL);
1474 ctrl |= E1000_CTRL_SWDPIN0;
1475 ctrl |= E1000_CTRL_SWDPIO0;
1476 ew32(CTRL, ctrl);
1477 break;
1478 case e1000_media_type_copper:
1479 ew32(LEDCTL, hw->mac.ledctl_mode1);
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 return 0;
1486}
1487
1488/**
1489 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1490 * @hw: pointer to the HW structure
1491 * @no_snoop: bitmap of snoop events
1492 *
1493 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1494 **/
1495void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1496{
1497 u32 gcr;
1498
1499 if (no_snoop) {
1500 gcr = er32(GCR);
1501 gcr &= ~(PCIE_NO_SNOOP_ALL);
1502 gcr |= no_snoop;
1503 ew32(GCR, gcr);
1504 }
1505}
1506
1507/**
1508 * e1000e_disable_pcie_master - Disables PCI-express master access
1509 * @hw: pointer to the HW structure
1510 *
1511 * Returns 0 if successful, else returns -10
1512 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1513 * the master requests to be disabled.
1514 *
1515 * Disables PCI-Express master access and verifies there are no pending
1516 * requests.
1517 **/
1518s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1519{
1520 u32 ctrl;
1521 s32 timeout = MASTER_DISABLE_TIMEOUT;
1522
1523 ctrl = er32(CTRL);
1524 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1525 ew32(CTRL, ctrl);
1526
1527 while (timeout) {
1528 if (!(er32(STATUS) &
1529 E1000_STATUS_GIO_MASTER_ENABLE))
1530 break;
1531 udelay(100);
1532 timeout--;
1533 }
1534
1535 if (!timeout) {
1536 hw_dbg(hw, "Master requests are pending.\n");
1537 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1538 }
1539
1540 return 0;
1541}
1542
1543/**
1544 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1545 * @hw: pointer to the HW structure
1546 *
1547 * Reset the Adaptive Interframe Spacing throttle to default values.
1548 **/
1549void e1000e_reset_adaptive(struct e1000_hw *hw)
1550{
1551 struct e1000_mac_info *mac = &hw->mac;
1552
1553 mac->current_ifs_val = 0;
1554 mac->ifs_min_val = IFS_MIN;
1555 mac->ifs_max_val = IFS_MAX;
1556 mac->ifs_step_size = IFS_STEP;
1557 mac->ifs_ratio = IFS_RATIO;
1558
1559 mac->in_ifs_mode = 0;
1560 ew32(AIT, 0);
1561}
1562
1563/**
1564 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1565 * @hw: pointer to the HW structure
1566 *
1567 * Update the Adaptive Interframe Spacing Throttle value based on the
1568 * time between transmitted packets and time between collisions.
1569 **/
1570void e1000e_update_adaptive(struct e1000_hw *hw)
1571{
1572 struct e1000_mac_info *mac = &hw->mac;
1573
1574 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1575 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1576 mac->in_ifs_mode = 1;
1577 if (mac->current_ifs_val < mac->ifs_max_val) {
1578 if (!mac->current_ifs_val)
1579 mac->current_ifs_val = mac->ifs_min_val;
1580 else
1581 mac->current_ifs_val +=
1582 mac->ifs_step_size;
1583 ew32(AIT,
1584 mac->current_ifs_val);
1585 }
1586 }
1587 } else {
1588 if (mac->in_ifs_mode &&
1589 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1590 mac->current_ifs_val = 0;
1591 mac->in_ifs_mode = 0;
1592 ew32(AIT, 0);
1593 }
1594 }
1595}
1596
1597/**
1598 * e1000_raise_eec_clk - Raise EEPROM clock
1599 * @hw: pointer to the HW structure
1600 * @eecd: pointer to the EEPROM
1601 *
1602 * Enable/Raise the EEPROM clock bit.
1603 **/
1604static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1605{
1606 *eecd = *eecd | E1000_EECD_SK;
1607 ew32(EECD, *eecd);
1608 e1e_flush();
1609 udelay(hw->nvm.delay_usec);
1610}
1611
1612/**
1613 * e1000_lower_eec_clk - Lower EEPROM clock
1614 * @hw: pointer to the HW structure
1615 * @eecd: pointer to the EEPROM
1616 *
1617 * Clear/Lower the EEPROM clock bit.
1618 **/
1619static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1620{
1621 *eecd = *eecd & ~E1000_EECD_SK;
1622 ew32(EECD, *eecd);
1623 e1e_flush();
1624 udelay(hw->nvm.delay_usec);
1625}
1626
1627/**
1628 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1629 * @hw: pointer to the HW structure
1630 * @data: data to send to the EEPROM
1631 * @count: number of bits to shift out
1632 *
1633 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1634 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1635 * In order to do this, "data" must be broken down into bits.
1636 **/
1637static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1638{
1639 struct e1000_nvm_info *nvm = &hw->nvm;
1640 u32 eecd = er32(EECD);
1641 u32 mask;
1642
1643 mask = 0x01 << (count - 1);
1644 if (nvm->type == e1000_nvm_eeprom_spi)
1645 eecd |= E1000_EECD_DO;
1646
1647 do {
1648 eecd &= ~E1000_EECD_DI;
1649
1650 if (data & mask)
1651 eecd |= E1000_EECD_DI;
1652
1653 ew32(EECD, eecd);
1654 e1e_flush();
1655
1656 udelay(nvm->delay_usec);
1657
1658 e1000_raise_eec_clk(hw, &eecd);
1659 e1000_lower_eec_clk(hw, &eecd);
1660
1661 mask >>= 1;
1662 } while (mask);
1663
1664 eecd &= ~E1000_EECD_DI;
1665 ew32(EECD, eecd);
1666}
1667
1668/**
1669 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1670 * @hw: pointer to the HW structure
1671 * @count: number of bits to shift in
1672 *
1673 * In order to read a register from the EEPROM, we need to shift 'count' bits
1674 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1675 * the EEPROM (setting the SK bit), and then reading the value of the data out
1676 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1677 * always be clear.
1678 **/
1679static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1680{
1681 u32 eecd;
1682 u32 i;
1683 u16 data;
1684
1685 eecd = er32(EECD);
1686
1687 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1688 data = 0;
1689
1690 for (i = 0; i < count; i++) {
1691 data <<= 1;
1692 e1000_raise_eec_clk(hw, &eecd);
1693
1694 eecd = er32(EECD);
1695
1696 eecd &= ~E1000_EECD_DI;
1697 if (eecd & E1000_EECD_DO)
1698 data |= 1;
1699
1700 e1000_lower_eec_clk(hw, &eecd);
1701 }
1702
1703 return data;
1704}
1705
1706/**
1707 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1708 * @hw: pointer to the HW structure
1709 * @ee_reg: EEPROM flag for polling
1710 *
1711 * Polls the EEPROM status bit for either read or write completion based
1712 * upon the value of 'ee_reg'.
1713 **/
1714s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1715{
1716 u32 attempts = 100000;
1717 u32 i, reg = 0;
1718
1719 for (i = 0; i < attempts; i++) {
1720 if (ee_reg == E1000_NVM_POLL_READ)
1721 reg = er32(EERD);
1722 else
1723 reg = er32(EEWR);
1724
1725 if (reg & E1000_NVM_RW_REG_DONE)
1726 return 0;
1727
1728 udelay(5);
1729 }
1730
1731 return -E1000_ERR_NVM;
1732}
1733
1734/**
1735 * e1000e_acquire_nvm - Generic request for access to EEPROM
1736 * @hw: pointer to the HW structure
1737 *
1738 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1739 * Return successful if access grant bit set, else clear the request for
1740 * EEPROM access and return -E1000_ERR_NVM (-1).
1741 **/
1742s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1743{
1744 u32 eecd = er32(EECD);
1745 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1746
1747 ew32(EECD, eecd | E1000_EECD_REQ);
1748 eecd = er32(EECD);
1749
1750 while (timeout) {
1751 if (eecd & E1000_EECD_GNT)
1752 break;
1753 udelay(5);
1754 eecd = er32(EECD);
1755 timeout--;
1756 }
1757
1758 if (!timeout) {
1759 eecd &= ~E1000_EECD_REQ;
1760 ew32(EECD, eecd);
1761 hw_dbg(hw, "Could not acquire NVM grant\n");
1762 return -E1000_ERR_NVM;
1763 }
1764
1765 return 0;
1766}
1767
1768/**
1769 * e1000_standby_nvm - Return EEPROM to standby state
1770 * @hw: pointer to the HW structure
1771 *
1772 * Return the EEPROM to a standby state.
1773 **/
1774static void e1000_standby_nvm(struct e1000_hw *hw)
1775{
1776 struct e1000_nvm_info *nvm = &hw->nvm;
1777 u32 eecd = er32(EECD);
1778
1779 if (nvm->type == e1000_nvm_eeprom_spi) {
1780 /* Toggle CS to flush commands */
1781 eecd |= E1000_EECD_CS;
1782 ew32(EECD, eecd);
1783 e1e_flush();
1784 udelay(nvm->delay_usec);
1785 eecd &= ~E1000_EECD_CS;
1786 ew32(EECD, eecd);
1787 e1e_flush();
1788 udelay(nvm->delay_usec);
1789 }
1790}
1791
1792/**
1793 * e1000_stop_nvm - Terminate EEPROM command
1794 * @hw: pointer to the HW structure
1795 *
1796 * Terminates the current command by inverting the EEPROM's chip select pin.
1797 **/
1798static void e1000_stop_nvm(struct e1000_hw *hw)
1799{
1800 u32 eecd;
1801
1802 eecd = er32(EECD);
1803 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1804 /* Pull CS high */
1805 eecd |= E1000_EECD_CS;
1806 e1000_lower_eec_clk(hw, &eecd);
1807 }
1808}
1809
1810/**
1811 * e1000e_release_nvm - Release exclusive access to EEPROM
1812 * @hw: pointer to the HW structure
1813 *
1814 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1815 **/
1816void e1000e_release_nvm(struct e1000_hw *hw)
1817{
1818 u32 eecd;
1819
1820 e1000_stop_nvm(hw);
1821
1822 eecd = er32(EECD);
1823 eecd &= ~E1000_EECD_REQ;
1824 ew32(EECD, eecd);
1825}
1826
1827/**
1828 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1829 * @hw: pointer to the HW structure
1830 *
1831 * Setups the EEPROM for reading and writing.
1832 **/
1833static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1834{
1835 struct e1000_nvm_info *nvm = &hw->nvm;
1836 u32 eecd = er32(EECD);
1837 u16 timeout = 0;
1838 u8 spi_stat_reg;
1839
1840 if (nvm->type == e1000_nvm_eeprom_spi) {
1841 /* Clear SK and CS */
1842 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1843 ew32(EECD, eecd);
1844 udelay(1);
1845 timeout = NVM_MAX_RETRY_SPI;
1846
1847 /* Read "Status Register" repeatedly until the LSB is cleared.
1848 * The EEPROM will signal that the command has been completed
1849 * by clearing bit 0 of the internal status register. If it's
1850 * not cleared within 'timeout', then error out. */
1851 while (timeout) {
1852 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1853 hw->nvm.opcode_bits);
1854 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1855 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1856 break;
1857
1858 udelay(5);
1859 e1000_standby_nvm(hw);
1860 timeout--;
1861 }
1862
1863 if (!timeout) {
1864 hw_dbg(hw, "SPI NVM Status error\n");
1865 return -E1000_ERR_NVM;
1866 }
1867 }
1868
1869 return 0;
1870}
1871
1872/**
1873 * e1000e_read_nvm_spi - Read EEPROM's using SPI
1874 * @hw: pointer to the HW structure
1875 * @offset: offset of word in the EEPROM to read
1876 * @words: number of words to read
1877 * @data: word read from the EEPROM
1878 *
1879 * Reads a 16 bit word from the EEPROM.
1880 **/
1881s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1882{
1883 struct e1000_nvm_info *nvm = &hw->nvm;
1884 u32 i = 0;
1885 s32 ret_val;
1886 u16 word_in;
1887 u8 read_opcode = NVM_READ_OPCODE_SPI;
1888
1889 /* A check for invalid values: offset too large, too many words,
1890 * and not enough words. */
1891 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1892 (words == 0)) {
1893 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1894 return -E1000_ERR_NVM;
1895 }
1896
1897 ret_val = nvm->ops.acquire_nvm(hw);
1898 if (ret_val)
1899 return ret_val;
1900
1901 ret_val = e1000_ready_nvm_eeprom(hw);
1902 if (ret_val) {
1903 nvm->ops.release_nvm(hw);
1904 return ret_val;
1905 }
1906
1907 e1000_standby_nvm(hw);
1908
1909 if ((nvm->address_bits == 8) && (offset >= 128))
1910 read_opcode |= NVM_A8_OPCODE_SPI;
1911
1912 /* Send the READ command (opcode + addr) */
1913 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1914 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1915
1916 /* Read the data. SPI NVMs increment the address with each byte
1917 * read and will roll over if reading beyond the end. This allows
1918 * us to read the whole NVM from any offset */
1919 for (i = 0; i < words; i++) {
1920 word_in = e1000_shift_in_eec_bits(hw, 16);
1921 data[i] = (word_in >> 8) | (word_in << 8);
1922 }
1923
1924 nvm->ops.release_nvm(hw);
1925 return 0;
1926}
1927
1928/**
1929 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1930 * @hw: pointer to the HW structure
1931 * @offset: offset of word in the EEPROM to read
1932 * @words: number of words to read
1933 * @data: word read from the EEPROM
1934 *
1935 * Reads a 16 bit word from the EEPROM using the EERD register.
1936 **/
1937s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1938{
1939 struct e1000_nvm_info *nvm = &hw->nvm;
1940 u32 i, eerd = 0;
1941 s32 ret_val = 0;
1942
1943 /* A check for invalid values: offset too large, too many words,
1944 * and not enough words. */
1945 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1946 (words == 0)) {
1947 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1948 return -E1000_ERR_NVM;
1949 }
1950
1951 for (i = 0; i < words; i++) {
1952 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1953 E1000_NVM_RW_REG_START;
1954
1955 ew32(EERD, eerd);
1956 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1957 if (ret_val)
1958 break;
1959
1960 data[i] = (er32(EERD) >>
1961 E1000_NVM_RW_REG_DATA);
1962 }
1963
1964 return ret_val;
1965}
1966
1967/**
1968 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1969 * @hw: pointer to the HW structure
1970 * @offset: offset within the EEPROM to be written to
1971 * @words: number of words to write
1972 * @data: 16 bit word(s) to be written to the EEPROM
1973 *
1974 * Writes data to EEPROM at offset using SPI interface.
1975 *
1976 * If e1000e_update_nvm_checksum is not called after this function , the
1977 * EEPROM will most likley contain an invalid checksum.
1978 **/
1979s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1980{
1981 struct e1000_nvm_info *nvm = &hw->nvm;
1982 s32 ret_val;
1983 u16 widx = 0;
1984
1985 /* A check for invalid values: offset too large, too many words,
1986 * and not enough words. */
1987 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988 (words == 0)) {
1989 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1990 return -E1000_ERR_NVM;
1991 }
1992
1993 ret_val = nvm->ops.acquire_nvm(hw);
1994 if (ret_val)
1995 return ret_val;
1996
1997 msleep(10);
1998
1999 while (widx < words) {
2000 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2001
2002 ret_val = e1000_ready_nvm_eeprom(hw);
2003 if (ret_val) {
2004 nvm->ops.release_nvm(hw);
2005 return ret_val;
2006 }
2007
2008 e1000_standby_nvm(hw);
2009
2010 /* Send the WRITE ENABLE command (8 bit opcode) */
2011 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2012 nvm->opcode_bits);
2013
2014 e1000_standby_nvm(hw);
2015
2016 /* Some SPI eeproms use the 8th address bit embedded in the
2017 * opcode */
2018 if ((nvm->address_bits == 8) && (offset >= 128))
2019 write_opcode |= NVM_A8_OPCODE_SPI;
2020
2021 /* Send the Write command (8-bit opcode + addr) */
2022 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2023 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2024 nvm->address_bits);
2025
2026 /* Loop to allow for up to whole page write of eeprom */
2027 while (widx < words) {
2028 u16 word_out = data[widx];
2029 word_out = (word_out >> 8) | (word_out << 8);
2030 e1000_shift_out_eec_bits(hw, word_out, 16);
2031 widx++;
2032
2033 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2034 e1000_standby_nvm(hw);
2035 break;
2036 }
2037 }
2038 }
2039
2040 msleep(10);
2041 return 0;
2042}
2043
2044/**
2045 * e1000e_read_mac_addr - Read device MAC address
2046 * @hw: pointer to the HW structure
2047 *
2048 * Reads the device MAC address from the EEPROM and stores the value.
2049 * Since devices with two ports use the same EEPROM, we increment the
2050 * last bit in the MAC address for the second port.
2051 **/
2052s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2053{
2054 s32 ret_val;
2055 u16 offset, nvm_data, i;
2056
2057 for (i = 0; i < ETH_ALEN; i += 2) {
2058 offset = i >> 1;
2059 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2060 if (ret_val) {
2061 hw_dbg(hw, "NVM Read Error\n");
2062 return ret_val;
2063 }
2064 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2065 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2066 }
2067
2068 /* Flip last bit of mac address if we're on second port */
2069 if (hw->bus.func == E1000_FUNC_1)
2070 hw->mac.perm_addr[5] ^= 1;
2071
2072 for (i = 0; i < ETH_ALEN; i++)
2073 hw->mac.addr[i] = hw->mac.perm_addr[i];
2074
2075 return 0;
2076}
2077
2078/**
2079 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2080 * @hw: pointer to the HW structure
2081 *
2082 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2083 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2084 **/
2085s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2086{
2087 s32 ret_val;
2088 u16 checksum = 0;
2089 u16 i, nvm_data;
2090
2091 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2092 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2093 if (ret_val) {
2094 hw_dbg(hw, "NVM Read Error\n");
2095 return ret_val;
2096 }
2097 checksum += nvm_data;
2098 }
2099
2100 if (checksum != (u16) NVM_SUM) {
2101 hw_dbg(hw, "NVM Checksum Invalid\n");
2102 return -E1000_ERR_NVM;
2103 }
2104
2105 return 0;
2106}
2107
2108/**
2109 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2110 * @hw: pointer to the HW structure
2111 *
2112 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2113 * up to the checksum. Then calculates the EEPROM checksum and writes the
2114 * value to the EEPROM.
2115 **/
2116s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2117{
2118 s32 ret_val;
2119 u16 checksum = 0;
2120 u16 i, nvm_data;
2121
2122 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2123 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2124 if (ret_val) {
2125 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2126 return ret_val;
2127 }
2128 checksum += nvm_data;
2129 }
2130 checksum = (u16) NVM_SUM - checksum;
2131 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2132 if (ret_val)
2133 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2134
2135 return ret_val;
2136}
2137
2138/**
2139 * e1000e_reload_nvm - Reloads EEPROM
2140 * @hw: pointer to the HW structure
2141 *
2142 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2143 * extended control register.
2144 **/
2145void e1000e_reload_nvm(struct e1000_hw *hw)
2146{
2147 u32 ctrl_ext;
2148
2149 udelay(10);
2150 ctrl_ext = er32(CTRL_EXT);
2151 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2152 ew32(CTRL_EXT, ctrl_ext);
2153 e1e_flush();
2154}
2155
2156/**
2157 * e1000_calculate_checksum - Calculate checksum for buffer
2158 * @buffer: pointer to EEPROM
2159 * @length: size of EEPROM to calculate a checksum for
2160 *
2161 * Calculates the checksum for some buffer on a specified length. The
2162 * checksum calculated is returned.
2163 **/
2164static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2165{
2166 u32 i;
2167 u8 sum = 0;
2168
2169 if (!buffer)
2170 return 0;
2171
2172 for (i = 0; i < length; i++)
2173 sum += buffer[i];
2174
2175 return (u8) (0 - sum);
2176}
2177
2178/**
2179 * e1000_mng_enable_host_if - Checks host interface is enabled
2180 * @hw: pointer to the HW structure
2181 *
2182 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2183 *
2184 * This function checks whether the HOST IF is enabled for command operaton
2185 * and also checks whether the previous command is completed. It busy waits
2186 * in case of previous command is not completed.
2187 **/
2188static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2189{
2190 u32 hicr;
2191 u8 i;
2192
2193 /* Check that the host interface is enabled. */
2194 hicr = er32(HICR);
2195 if ((hicr & E1000_HICR_EN) == 0) {
2196 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2197 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2198 }
2199 /* check the previous command is completed */
2200 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2201 hicr = er32(HICR);
2202 if (!(hicr & E1000_HICR_C))
2203 break;
2204 mdelay(1);
2205 }
2206
2207 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2208 hw_dbg(hw, "Previous command timeout failed .\n");
2209 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2210 }
2211
2212 return 0;
2213}
2214
2215/**
2216 * e1000e_check_mng_mode - check managament mode
2217 * @hw: pointer to the HW structure
2218 *
2219 * Reads the firmware semaphore register and returns true (>0) if
2220 * manageability is enabled, else false (0).
2221 **/
2222bool e1000e_check_mng_mode(struct e1000_hw *hw)
2223{
2224 u32 fwsm = er32(FWSM);
2225
2226 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
2227}
2228
2229/**
2230 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
2231 * @hw: pointer to the HW structure
2232 *
2233 * Enables packet filtering on transmit packets if manageability is enabled
2234 * and host interface is enabled.
2235 **/
2236bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2237{
2238 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2239 u32 *buffer = (u32 *)&hw->mng_cookie;
2240 u32 offset;
2241 s32 ret_val, hdr_csum, csum;
2242 u8 i, len;
2243
2244 /* No manageability, no filtering */
2245 if (!e1000e_check_mng_mode(hw)) {
2246 hw->mac.tx_pkt_filtering = 0;
2247 return 0;
2248 }
2249
2250 /* If we can't read from the host interface for whatever
2251 * reason, disable filtering.
2252 */
2253 ret_val = e1000_mng_enable_host_if(hw);
2254 if (ret_val != 0) {
2255 hw->mac.tx_pkt_filtering = 0;
2256 return ret_val;
2257 }
2258
2259 /* Read in the header. Length and offset are in dwords. */
2260 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2261 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2262 for (i = 0; i < len; i++)
2263 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2264 hdr_csum = hdr->checksum;
2265 hdr->checksum = 0;
2266 csum = e1000_calculate_checksum((u8 *)hdr,
2267 E1000_MNG_DHCP_COOKIE_LENGTH);
2268 /* If either the checksums or signature don't match, then
2269 * the cookie area isn't considered valid, in which case we
2270 * take the safe route of assuming Tx filtering is enabled.
2271 */
2272 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2273 hw->mac.tx_pkt_filtering = 1;
2274 return 1;
2275 }
2276
2277 /* Cookie area is valid, make the final check for filtering. */
2278 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2279 hw->mac.tx_pkt_filtering = 0;
2280 return 0;
2281 }
2282
2283 hw->mac.tx_pkt_filtering = 1;
2284 return 1;
2285}
2286
2287/**
2288 * e1000_mng_write_cmd_header - Writes manageability command header
2289 * @hw: pointer to the HW structure
2290 * @hdr: pointer to the host interface command header
2291 *
2292 * Writes the command header after does the checksum calculation.
2293 **/
2294static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2295 struct e1000_host_mng_command_header *hdr)
2296{
2297 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2298
2299 /* Write the whole command header structure with new checksum. */
2300
2301 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2302
2303 length >>= 2;
2304 /* Write the relevant command block into the ram area. */
2305 for (i = 0; i < length; i++) {
2306 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2307 *((u32 *) hdr + i));
2308 e1e_flush();
2309 }
2310
2311 return 0;
2312}
2313
2314/**
2315 * e1000_mng_host_if_write - Writes to the manageability host interface
2316 * @hw: pointer to the HW structure
2317 * @buffer: pointer to the host interface buffer
2318 * @length: size of the buffer
2319 * @offset: location in the buffer to write to
2320 * @sum: sum of the data (not checksum)
2321 *
2322 * This function writes the buffer content at the offset given on the host if.
2323 * It also does alignment considerations to do the writes in most efficient
2324 * way. Also fills up the sum of the buffer in *buffer parameter.
2325 **/
2326static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2327 u16 length, u16 offset, u8 *sum)
2328{
2329 u8 *tmp;
2330 u8 *bufptr = buffer;
2331 u32 data = 0;
2332 u16 remaining, i, j, prev_bytes;
2333
2334 /* sum = only sum of the data and it is not checksum */
2335
2336 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2337 return -E1000_ERR_PARAM;
2338
2339 tmp = (u8 *)&data;
2340 prev_bytes = offset & 0x3;
2341 offset >>= 2;
2342
2343 if (prev_bytes) {
2344 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2345 for (j = prev_bytes; j < sizeof(u32); j++) {
2346 *(tmp + j) = *bufptr++;
2347 *sum += *(tmp + j);
2348 }
2349 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2350 length -= j - prev_bytes;
2351 offset++;
2352 }
2353
2354 remaining = length & 0x3;
2355 length -= remaining;
2356
2357 /* Calculate length in DWORDs */
2358 length >>= 2;
2359
2360 /* The device driver writes the relevant command block into the
2361 * ram area. */
2362 for (i = 0; i < length; i++) {
2363 for (j = 0; j < sizeof(u32); j++) {
2364 *(tmp + j) = *bufptr++;
2365 *sum += *(tmp + j);
2366 }
2367
2368 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2369 }
2370 if (remaining) {
2371 for (j = 0; j < sizeof(u32); j++) {
2372 if (j < remaining)
2373 *(tmp + j) = *bufptr++;
2374 else
2375 *(tmp + j) = 0;
2376
2377 *sum += *(tmp + j);
2378 }
2379 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2380 }
2381
2382 return 0;
2383}
2384
2385/**
2386 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2387 * @hw: pointer to the HW structure
2388 * @buffer: pointer to the host interface
2389 * @length: size of the buffer
2390 *
2391 * Writes the DHCP information to the host interface.
2392 **/
2393s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2394{
2395 struct e1000_host_mng_command_header hdr;
2396 s32 ret_val;
2397 u32 hicr;
2398
2399 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2400 hdr.command_length = length;
2401 hdr.reserved1 = 0;
2402 hdr.reserved2 = 0;
2403 hdr.checksum = 0;
2404
2405 /* Enable the host interface */
2406 ret_val = e1000_mng_enable_host_if(hw);
2407 if (ret_val)
2408 return ret_val;
2409
2410 /* Populate the host interface with the contents of "buffer". */
2411 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2412 sizeof(hdr), &(hdr.checksum));
2413 if (ret_val)
2414 return ret_val;
2415
2416 /* Write the manageability command header */
2417 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2418 if (ret_val)
2419 return ret_val;
2420
2421 /* Tell the ARC a new command is pending. */
2422 hicr = er32(HICR);
2423 ew32(HICR, hicr | E1000_HICR_C);
2424
2425 return 0;
2426}
2427
2428/**
2429 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2430 * @hw: pointer to the HW structure
2431 *
2432 * Verifies the hardware needs to allow ARPs to be processed by the host.
2433 **/
2434bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2435{
2436 u32 manc;
2437 u32 fwsm, factps;
2438 bool ret_val = 0;
2439
2440 manc = er32(MANC);
2441
2442 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2443 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2444 return ret_val;
2445
2446 if (hw->mac.arc_subsystem_valid) {
2447 fwsm = er32(FWSM);
2448 factps = er32(FACTPS);
2449
2450 if (!(factps & E1000_FACTPS_MNGCG) &&
2451 ((fwsm & E1000_FWSM_MODE_MASK) ==
2452 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2453 ret_val = 1;
2454 return ret_val;
2455 }
2456 } else {
2457 if ((manc & E1000_MANC_SMBUS_EN) &&
2458 !(manc & E1000_MANC_ASF_EN)) {
2459 ret_val = 1;
2460 return ret_val;
2461 }
2462 }
2463
2464 return ret_val;
2465}
2466
2467s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2468{
2469 s32 ret_val;
2470 u16 nvm_data;
2471
2472 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2473 if (ret_val) {
2474 hw_dbg(hw, "NVM Read Error\n");
2475 return ret_val;
2476 }
2477 *part_num = (u32)(nvm_data << 16);
2478
2479 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2480 if (ret_val) {
2481 hw_dbg(hw, "NVM Read Error\n");
2482 return ret_val;
2483 }
2484 *part_num |= nvm_data;
2485
2486 return 0;
2487}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
new file mode 100644
index 000000000000..eeb40ccbcb22
--- /dev/null
+++ b/drivers/net/e1000e/netdev.c
@@ -0,0 +1,4441 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/vmalloc.h>
34#include <linux/pagemap.h>
35#include <linux/delay.h>
36#include <linux/netdevice.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/mii.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
44#include <linux/cpu.h>
45#include <linux/smp.h>
46
47#include "e1000.h"
48
49#define DRV_VERSION "0.2.0"
50char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION;
52
53static const struct e1000_info *e1000_info_tbl[] = {
54 [board_82571] = &e1000_82571_info,
55 [board_82572] = &e1000_82572_info,
56 [board_82573] = &e1000_82573_info,
57 [board_80003es2lan] = &e1000_es2_info,
58 [board_ich8lan] = &e1000_ich8_info,
59 [board_ich9lan] = &e1000_ich9_info,
60};
61
62#ifdef DEBUG
63/**
64 * e1000_get_hw_dev_name - return device name string
65 * used by hardware layer to print debugging information
66 **/
67char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
68{
69 struct e1000_adapter *adapter = hw->back;
70 struct net_device *netdev = adapter->netdev;
71 return netdev->name;
72}
73#endif
74
75/**
76 * e1000_desc_unused - calculate if we have unused descriptors
77 **/
78static int e1000_desc_unused(struct e1000_ring *ring)
79{
80 if (ring->next_to_clean > ring->next_to_use)
81 return ring->next_to_clean - ring->next_to_use - 1;
82
83 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
84}
85
86/**
87 * e1000_receive_skb - helper function to handle rx indications
88 * @adapter: board private structure
89 * @status: descriptor status field as written by hardware
90 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
91 * @skb: pointer to sk_buff to be indicated to stack
92 **/
93static void e1000_receive_skb(struct e1000_adapter *adapter,
94 struct net_device *netdev,
95 struct sk_buff *skb,
96 u8 status, u16 vlan)
97{
98 skb->protocol = eth_type_trans(skb, netdev);
99
100 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
101 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
102 le16_to_cpu(vlan) &
103 E1000_RXD_SPC_VLAN_MASK);
104 else
105 netif_receive_skb(skb);
106
107 netdev->last_rx = jiffies;
108}
109
110/**
111 * e1000_rx_checksum - Receive Checksum Offload for 82543
112 * @adapter: board private structure
113 * @status_err: receive descriptor status and error fields
114 * @csum: receive descriptor csum field
115 * @sk_buff: socket buffer with received data
116 **/
117static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
118 u32 csum, struct sk_buff *skb)
119{
120 u16 status = (u16)status_err;
121 u8 errors = (u8)(status_err >> 24);
122 skb->ip_summed = CHECKSUM_NONE;
123
124 /* Ignore Checksum bit is set */
125 if (status & E1000_RXD_STAT_IXSM)
126 return;
127 /* TCP/UDP checksum error bit is set */
128 if (errors & E1000_RXD_ERR_TCPE) {
129 /* let the stack verify checksum errors */
130 adapter->hw_csum_err++;
131 return;
132 }
133
134 /* TCP/UDP Checksum has not been calculated */
135 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
136 return;
137
138 /* It must be a TCP or UDP packet with a valid checksum */
139 if (status & E1000_RXD_STAT_TCPCS) {
140 /* TCP checksum is good */
141 skb->ip_summed = CHECKSUM_UNNECESSARY;
142 } else {
143 /* IP fragment with UDP payload */
144 /* Hardware complements the payload checksum, so we undo it
145 * and then put the value in host order for further stack use.
146 */
147 csum = ntohl(csum ^ 0xFFFF);
148 skb->csum = csum;
149 skb->ip_summed = CHECKSUM_COMPLETE;
150 }
151 adapter->hw_csum_good++;
152}
153
154/**
155 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
156 * @adapter: address of board private structure
157 **/
158static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
159 int cleaned_count)
160{
161 struct net_device *netdev = adapter->netdev;
162 struct pci_dev *pdev = adapter->pdev;
163 struct e1000_ring *rx_ring = adapter->rx_ring;
164 struct e1000_rx_desc *rx_desc;
165 struct e1000_buffer *buffer_info;
166 struct sk_buff *skb;
167 unsigned int i;
168 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
169
170 i = rx_ring->next_to_use;
171 buffer_info = &rx_ring->buffer_info[i];
172
173 while (cleaned_count--) {
174 skb = buffer_info->skb;
175 if (skb) {
176 skb_trim(skb, 0);
177 goto map_skb;
178 }
179
180 skb = netdev_alloc_skb(netdev, bufsz);
181 if (!skb) {
182 /* Better luck next round */
183 adapter->alloc_rx_buff_failed++;
184 break;
185 }
186
187 /* Make buffer alignment 2 beyond a 16 byte boundary
188 * this will result in a 16 byte aligned IP header after
189 * the 14 byte MAC header is removed
190 */
191 skb_reserve(skb, NET_IP_ALIGN);
192
193 buffer_info->skb = skb;
194map_skb:
195 buffer_info->dma = pci_map_single(pdev, skb->data,
196 adapter->rx_buffer_len,
197 PCI_DMA_FROMDEVICE);
198 if (pci_dma_mapping_error(buffer_info->dma)) {
199 dev_err(&pdev->dev, "RX DMA map failed\n");
200 adapter->rx_dma_failed++;
201 break;
202 }
203
204 rx_desc = E1000_RX_DESC(*rx_ring, i);
205 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
206
207 i++;
208 if (i == rx_ring->count)
209 i = 0;
210 buffer_info = &rx_ring->buffer_info[i];
211 }
212
213 if (rx_ring->next_to_use != i) {
214 rx_ring->next_to_use = i;
215 if (i-- == 0)
216 i = (rx_ring->count - 1);
217
218 /* Force memory writes to complete before letting h/w
219 * know there are new descriptors to fetch. (Only
220 * applicable for weak-ordered memory model archs,
221 * such as IA-64). */
222 wmb();
223 writel(i, adapter->hw.hw_addr + rx_ring->tail);
224 }
225}
226
227/**
228 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
229 * @adapter: address of board private structure
230 **/
231static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
232 int cleaned_count)
233{
234 struct net_device *netdev = adapter->netdev;
235 struct pci_dev *pdev = adapter->pdev;
236 union e1000_rx_desc_packet_split *rx_desc;
237 struct e1000_ring *rx_ring = adapter->rx_ring;
238 struct e1000_buffer *buffer_info;
239 struct e1000_ps_page *ps_page;
240 struct sk_buff *skb;
241 unsigned int i, j;
242
243 i = rx_ring->next_to_use;
244 buffer_info = &rx_ring->buffer_info[i];
245
246 while (cleaned_count--) {
247 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
248
249 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
250 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
251 + j];
252 if (j < adapter->rx_ps_pages) {
253 if (!ps_page->page) {
254 ps_page->page = alloc_page(GFP_ATOMIC);
255 if (!ps_page->page) {
256 adapter->alloc_rx_buff_failed++;
257 goto no_buffers;
258 }
259 ps_page->dma = pci_map_page(pdev,
260 ps_page->page,
261 0, PAGE_SIZE,
262 PCI_DMA_FROMDEVICE);
263 if (pci_dma_mapping_error(
264 ps_page->dma)) {
265 dev_err(&adapter->pdev->dev,
266 "RX DMA page map failed\n");
267 adapter->rx_dma_failed++;
268 goto no_buffers;
269 }
270 }
271 /*
272 * Refresh the desc even if buffer_addrs
273 * didn't change because each write-back
274 * erases this info.
275 */
276 rx_desc->read.buffer_addr[j+1] =
277 cpu_to_le64(ps_page->dma);
278 } else {
279 rx_desc->read.buffer_addr[j+1] = ~0;
280 }
281 }
282
283 skb = netdev_alloc_skb(netdev,
284 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
285
286 if (!skb) {
287 adapter->alloc_rx_buff_failed++;
288 break;
289 }
290
291 /* Make buffer alignment 2 beyond a 16 byte boundary
292 * this will result in a 16 byte aligned IP header after
293 * the 14 byte MAC header is removed
294 */
295 skb_reserve(skb, NET_IP_ALIGN);
296
297 buffer_info->skb = skb;
298 buffer_info->dma = pci_map_single(pdev, skb->data,
299 adapter->rx_ps_bsize0,
300 PCI_DMA_FROMDEVICE);
301 if (pci_dma_mapping_error(buffer_info->dma)) {
302 dev_err(&pdev->dev, "RX DMA map failed\n");
303 adapter->rx_dma_failed++;
304 /* cleanup skb */
305 dev_kfree_skb_any(skb);
306 buffer_info->skb = NULL;
307 break;
308 }
309
310 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
311
312 i++;
313 if (i == rx_ring->count)
314 i = 0;
315 buffer_info = &rx_ring->buffer_info[i];
316 }
317
318no_buffers:
319 if (rx_ring->next_to_use != i) {
320 rx_ring->next_to_use = i;
321
322 if (!(i--))
323 i = (rx_ring->count - 1);
324
325 /* Force memory writes to complete before letting h/w
326 * know there are new descriptors to fetch. (Only
327 * applicable for weak-ordered memory model archs,
328 * such as IA-64). */
329 wmb();
330 /* Hardware increments by 16 bytes, but packet split
331 * descriptors are 32 bytes...so we increment tail
332 * twice as much.
333 */
334 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
335 }
336}
337
338/**
339 * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
340 *
341 * @adapter: address of board private structure
342 * @cleaned_count: number of buffers to allocate this pass
343 **/
344static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
345 int cleaned_count)
346{
347 struct net_device *netdev = adapter->netdev;
348 struct pci_dev *pdev = adapter->pdev;
349 struct e1000_ring *rx_ring = adapter->rx_ring;
350 struct e1000_rx_desc *rx_desc;
351 struct e1000_buffer *buffer_info;
352 struct sk_buff *skb;
353 unsigned int i;
354 unsigned int bufsz = 256 -
355 16 /*for skb_reserve */ -
356 NET_IP_ALIGN;
357
358 i = rx_ring->next_to_use;
359 buffer_info = &rx_ring->buffer_info[i];
360
361 while (cleaned_count--) {
362 skb = buffer_info->skb;
363 if (skb) {
364 skb_trim(skb, 0);
365 goto check_page;
366 }
367
368 skb = netdev_alloc_skb(netdev, bufsz);
369 if (!skb) {
370 /* Better luck next round */
371 adapter->alloc_rx_buff_failed++;
372 break;
373 }
374
375 /* Make buffer alignment 2 beyond a 16 byte boundary
376 * this will result in a 16 byte aligned IP header after
377 * the 14 byte MAC header is removed
378 */
379 skb_reserve(skb, NET_IP_ALIGN);
380
381 buffer_info->skb = skb;
382check_page:
383 /* allocate a new page if necessary */
384 if (!buffer_info->page) {
385 buffer_info->page = alloc_page(GFP_ATOMIC);
386 if (!buffer_info->page) {
387 adapter->alloc_rx_buff_failed++;
388 break;
389 }
390 }
391
392 if (!buffer_info->dma)
393 buffer_info->dma = pci_map_page(pdev,
394 buffer_info->page, 0,
395 PAGE_SIZE,
396 PCI_DMA_FROMDEVICE);
397 if (pci_dma_mapping_error(buffer_info->dma)) {
398 dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
399 adapter->rx_dma_failed++;
400 break;
401 }
402
403 rx_desc = E1000_RX_DESC(*rx_ring, i);
404 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
405
406 i++;
407 if (i == rx_ring->count)
408 i = 0;
409 buffer_info = &rx_ring->buffer_info[i];
410 }
411
412 if (rx_ring->next_to_use != i) {
413 rx_ring->next_to_use = i;
414 if (i-- == 0)
415 i = (rx_ring->count - 1);
416
417 /* Force memory writes to complete before letting h/w
418 * know there are new descriptors to fetch. (Only
419 * applicable for weak-ordered memory model archs,
420 * such as IA-64). */
421 wmb();
422 writel(i, adapter->hw.hw_addr + rx_ring->tail);
423 }
424}
425
426/**
427 * e1000_clean_rx_irq - Send received data up the network stack; legacy
428 * @adapter: board private structure
429 *
430 * the return value indicates whether actual cleaning was done, there
431 * is no guarantee that everything was cleaned
432 **/
433static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
434 int *work_done, int work_to_do)
435{
436 struct net_device *netdev = adapter->netdev;
437 struct pci_dev *pdev = adapter->pdev;
438 struct e1000_ring *rx_ring = adapter->rx_ring;
439 struct e1000_rx_desc *rx_desc, *next_rxd;
440 struct e1000_buffer *buffer_info, *next_buffer;
441 u32 length;
442 unsigned int i;
443 int cleaned_count = 0;
444 bool cleaned = 0;
445 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
446
447 i = rx_ring->next_to_clean;
448 rx_desc = E1000_RX_DESC(*rx_ring, i);
449 buffer_info = &rx_ring->buffer_info[i];
450
451 while (rx_desc->status & E1000_RXD_STAT_DD) {
452 struct sk_buff *skb;
453 u8 status;
454
455 if (*work_done >= work_to_do)
456 break;
457 (*work_done)++;
458
459 status = rx_desc->status;
460 skb = buffer_info->skb;
461 buffer_info->skb = NULL;
462
463 prefetch(skb->data - NET_IP_ALIGN);
464
465 i++;
466 if (i == rx_ring->count)
467 i = 0;
468 next_rxd = E1000_RX_DESC(*rx_ring, i);
469 prefetch(next_rxd);
470
471 next_buffer = &rx_ring->buffer_info[i];
472
473 cleaned = 1;
474 cleaned_count++;
475 pci_unmap_single(pdev,
476 buffer_info->dma,
477 adapter->rx_buffer_len,
478 PCI_DMA_FROMDEVICE);
479 buffer_info->dma = 0;
480
481 length = le16_to_cpu(rx_desc->length);
482
483 /* !EOP means multiple descriptors were used to store a single
484 * packet, also make sure the frame isn't just CRC only */
485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
486 /* All receives must fit into a single buffer */
487 ndev_dbg(netdev, "%s: Receive packet consumed "
488 "multiple buffers\n", netdev->name);
489 /* recycle */
490 buffer_info->skb = skb;
491 goto next_desc;
492 }
493
494 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
495 /* recycle */
496 buffer_info->skb = skb;
497 goto next_desc;
498 }
499
500 /* adjust length to remove Ethernet CRC */
501 length -= 4;
502
503 /* probably a little skewed due to removing CRC */
504 total_rx_bytes += length;
505 total_rx_packets++;
506
507 /* code added for copybreak, this should improve
508 * performance for small packets with large amounts
509 * of reassembly being done in the stack */
510 if (length < copybreak) {
511 struct sk_buff *new_skb =
512 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
513 if (new_skb) {
514 skb_reserve(new_skb, NET_IP_ALIGN);
515 memcpy(new_skb->data - NET_IP_ALIGN,
516 skb->data - NET_IP_ALIGN,
517 length + NET_IP_ALIGN);
518 /* save the skb in buffer_info as good */
519 buffer_info->skb = skb;
520 skb = new_skb;
521 }
522 /* else just continue with the old one */
523 }
524 /* end copybreak code */
525 skb_put(skb, length);
526
527 /* Receive Checksum Offload */
528 e1000_rx_checksum(adapter,
529 (u32)(status) |
530 ((u32)(rx_desc->errors) << 24),
531 le16_to_cpu(rx_desc->csum), skb);
532
533 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
534
535next_desc:
536 rx_desc->status = 0;
537
538 /* return some buffers to hardware, one at a time is too slow */
539 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
540 adapter->alloc_rx_buf(adapter, cleaned_count);
541 cleaned_count = 0;
542 }
543
544 /* use prefetched values */
545 rx_desc = next_rxd;
546 buffer_info = next_buffer;
547 }
548 rx_ring->next_to_clean = i;
549
550 cleaned_count = e1000_desc_unused(rx_ring);
551 if (cleaned_count)
552 adapter->alloc_rx_buf(adapter, cleaned_count);
553
554 adapter->total_rx_packets += total_rx_packets;
555 adapter->total_rx_bytes += total_rx_bytes;
556 return cleaned;
557}
558
559static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
560 u16 length)
561{
562 bi->page = NULL;
563 skb->len += length;
564 skb->data_len += length;
565 skb->truesize += length;
566}
567
568static void e1000_put_txbuf(struct e1000_adapter *adapter,
569 struct e1000_buffer *buffer_info)
570{
571 if (buffer_info->dma) {
572 pci_unmap_page(adapter->pdev, buffer_info->dma,
573 buffer_info->length, PCI_DMA_TODEVICE);
574 buffer_info->dma = 0;
575 }
576 if (buffer_info->skb) {
577 dev_kfree_skb_any(buffer_info->skb);
578 buffer_info->skb = NULL;
579 }
580}
581
582static void e1000_print_tx_hang(struct e1000_adapter *adapter)
583{
584 struct e1000_ring *tx_ring = adapter->tx_ring;
585 unsigned int i = tx_ring->next_to_clean;
586 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
587 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
588 struct net_device *netdev = adapter->netdev;
589
590 /* detected Tx unit hang */
591 ndev_err(netdev,
592 "Detected Tx Unit Hang:\n"
593 " TDH <%x>\n"
594 " TDT <%x>\n"
595 " next_to_use <%x>\n"
596 " next_to_clean <%x>\n"
597 "buffer_info[next_to_clean]:\n"
598 " time_stamp <%lx>\n"
599 " next_to_watch <%x>\n"
600 " jiffies <%lx>\n"
601 " next_to_watch.status <%x>\n",
602 readl(adapter->hw.hw_addr + tx_ring->head),
603 readl(adapter->hw.hw_addr + tx_ring->tail),
604 tx_ring->next_to_use,
605 tx_ring->next_to_clean,
606 tx_ring->buffer_info[eop].time_stamp,
607 eop,
608 jiffies,
609 eop_desc->upper.fields.status);
610}
611
612/**
613 * e1000_clean_tx_irq - Reclaim resources after transmit completes
614 * @adapter: board private structure
615 *
616 * the return value indicates whether actual cleaning was done, there
617 * is no guarantee that everything was cleaned
618 **/
619static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
620{
621 struct net_device *netdev = adapter->netdev;
622 struct e1000_hw *hw = &adapter->hw;
623 struct e1000_ring *tx_ring = adapter->tx_ring;
624 struct e1000_tx_desc *tx_desc, *eop_desc;
625 struct e1000_buffer *buffer_info;
626 unsigned int i, eop;
627 unsigned int count = 0;
628 bool cleaned = 0;
629 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
630
631 i = tx_ring->next_to_clean;
632 eop = tx_ring->buffer_info[i].next_to_watch;
633 eop_desc = E1000_TX_DESC(*tx_ring, eop);
634
635 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
636 for (cleaned = 0; !cleaned; ) {
637 tx_desc = E1000_TX_DESC(*tx_ring, i);
638 buffer_info = &tx_ring->buffer_info[i];
639 cleaned = (i == eop);
640
641 if (cleaned) {
642 struct sk_buff *skb = buffer_info->skb;
643 unsigned int segs, bytecount;
644 segs = skb_shinfo(skb)->gso_segs ?: 1;
645 /* multiply data chunks by size of headers */
646 bytecount = ((segs - 1) * skb_headlen(skb)) +
647 skb->len;
648 total_tx_packets += segs;
649 total_tx_bytes += bytecount;
650 }
651
652 e1000_put_txbuf(adapter, buffer_info);
653 tx_desc->upper.data = 0;
654
655 i++;
656 if (i == tx_ring->count)
657 i = 0;
658 }
659
660 eop = tx_ring->buffer_info[i].next_to_watch;
661 eop_desc = E1000_TX_DESC(*tx_ring, eop);
662#define E1000_TX_WEIGHT 64
663 /* weight of a sort for tx, to avoid endless transmit cleanup */
664 if (count++ == E1000_TX_WEIGHT)
665 break;
666 }
667
668 tx_ring->next_to_clean = i;
669
670#define TX_WAKE_THRESHOLD 32
671 if (cleaned && netif_carrier_ok(netdev) &&
672 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
673 /* Make sure that anybody stopping the queue after this
674 * sees the new next_to_clean.
675 */
676 smp_mb();
677
678 if (netif_queue_stopped(netdev) &&
679 !(test_bit(__E1000_DOWN, &adapter->state))) {
680 netif_wake_queue(netdev);
681 ++adapter->restart_queue;
682 }
683 }
684
685 if (adapter->detect_tx_hung) {
686 /* Detect a transmit hang in hardware, this serializes the
687 * check with the clearing of time_stamp and movement of i */
688 adapter->detect_tx_hung = 0;
689 if (tx_ring->buffer_info[eop].dma &&
690 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
691 + (adapter->tx_timeout_factor * HZ))
692 && !(er32(STATUS) &
693 E1000_STATUS_TXOFF)) {
694 e1000_print_tx_hang(adapter);
695 netif_stop_queue(netdev);
696 }
697 }
698 adapter->total_tx_bytes += total_tx_bytes;
699 adapter->total_tx_packets += total_tx_packets;
700 return cleaned;
701}
702
703/**
704 * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
705 * @adapter: board private structure
706 *
707 * the return value indicates whether actual cleaning was done, there
708 * is no guarantee that everything was cleaned
709 **/
710static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
711 int *work_done, int work_to_do)
712{
713 struct net_device *netdev = adapter->netdev;
714 struct pci_dev *pdev = adapter->pdev;
715 struct e1000_ring *rx_ring = adapter->rx_ring;
716 struct e1000_rx_desc *rx_desc, *next_rxd;
717 struct e1000_buffer *buffer_info, *next_buffer;
718 u32 length;
719 unsigned int i;
720 int cleaned_count = 0;
721 bool cleaned = 0;
722 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
723
724 i = rx_ring->next_to_clean;
725 rx_desc = E1000_RX_DESC(*rx_ring, i);
726 buffer_info = &rx_ring->buffer_info[i];
727
728 while (rx_desc->status & E1000_RXD_STAT_DD) {
729 struct sk_buff *skb;
730 u8 status;
731
732 if (*work_done >= work_to_do)
733 break;
734 (*work_done)++;
735
736 status = rx_desc->status;
737 skb = buffer_info->skb;
738 buffer_info->skb = NULL;
739
740 i++;
741 if (i == rx_ring->count)
742 i = 0;
743 next_rxd = E1000_RX_DESC(*rx_ring, i);
744 prefetch(next_rxd);
745
746 next_buffer = &rx_ring->buffer_info[i];
747
748 cleaned = 1;
749 cleaned_count++;
750 pci_unmap_page(pdev,
751 buffer_info->dma,
752 PAGE_SIZE,
753 PCI_DMA_FROMDEVICE);
754 buffer_info->dma = 0;
755
756 length = le16_to_cpu(rx_desc->length);
757
758 /* errors is only valid for DD + EOP descriptors */
759 if ((status & E1000_RXD_STAT_EOP) &&
760 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
761 /* recycle both page and skb */
762 buffer_info->skb = skb;
763 /* an error means any chain goes out the window too */
764 if (rx_ring->rx_skb_top)
765 dev_kfree_skb(rx_ring->rx_skb_top);
766 rx_ring->rx_skb_top = NULL;
767 goto next_desc;
768 }
769
770#define rxtop rx_ring->rx_skb_top
771 if (!(status & E1000_RXD_STAT_EOP)) {
772 /* this descriptor is only the beginning (or middle) */
773 if (!rxtop) {
774 /* this is the beginning of a chain */
775 rxtop = skb;
776 skb_fill_page_desc(rxtop, 0, buffer_info->page,
777 0, length);
778 } else {
779 /* this is the middle of a chain */
780 skb_fill_page_desc(rxtop,
781 skb_shinfo(rxtop)->nr_frags,
782 buffer_info->page, 0,
783 length);
784 /* re-use the skb, only consumed the page */
785 buffer_info->skb = skb;
786 }
787 e1000_consume_page(buffer_info, rxtop, length);
788 goto next_desc;
789 } else {
790 if (rxtop) {
791 /* end of the chain */
792 skb_fill_page_desc(rxtop,
793 skb_shinfo(rxtop)->nr_frags,
794 buffer_info->page, 0, length);
795 /* re-use the current skb, we only consumed the
796 * page */
797 buffer_info->skb = skb;
798 skb = rxtop;
799 rxtop = NULL;
800 e1000_consume_page(buffer_info, skb, length);
801 } else {
802 /* no chain, got EOP, this buf is the packet
803 * copybreak to save the put_page/alloc_page */
804 if (length <= copybreak &&
805 skb_tailroom(skb) >= length) {
806 u8 *vaddr;
807 vaddr = kmap_atomic(buffer_info->page,
808 KM_SKB_DATA_SOFTIRQ);
809 memcpy(skb_tail_pointer(skb),
810 vaddr, length);
811 kunmap_atomic(vaddr,
812 KM_SKB_DATA_SOFTIRQ);
813 /* re-use the page, so don't erase
814 * buffer_info->page */
815 skb_put(skb, length);
816 } else {
817 skb_fill_page_desc(skb, 0,
818 buffer_info->page, 0,
819 length);
820 e1000_consume_page(buffer_info, skb,
821 length);
822 }
823 }
824 }
825
826 /* Receive Checksum Offload XXX recompute due to CRC strip? */
827 e1000_rx_checksum(adapter,
828 (u32)(status) |
829 ((u32)(rx_desc->errors) << 24),
830 le16_to_cpu(rx_desc->csum), skb);
831
832 pskb_trim(skb, skb->len - 4);
833
834 /* probably a little skewed due to removing CRC */
835 total_rx_bytes += skb->len;
836 total_rx_packets++;
837
838 /* eth type trans needs skb->data to point to something */
839 if (!pskb_may_pull(skb, ETH_HLEN)) {
840 ndev_err(netdev, "__pskb_pull_tail failed.\n");
841 dev_kfree_skb(skb);
842 goto next_desc;
843 }
844
845 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
846
847next_desc:
848 rx_desc->status = 0;
849
850 /* return some buffers to hardware, one at a time is too slow */
851 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
852 adapter->alloc_rx_buf(adapter, cleaned_count);
853 cleaned_count = 0;
854 }
855
856 /* use prefetched values */
857 rx_desc = next_rxd;
858 buffer_info = next_buffer;
859 }
860 rx_ring->next_to_clean = i;
861
862 cleaned_count = e1000_desc_unused(rx_ring);
863 if (cleaned_count)
864 adapter->alloc_rx_buf(adapter, cleaned_count);
865
866 adapter->total_rx_packets += total_rx_packets;
867 adapter->total_rx_bytes += total_rx_bytes;
868 return cleaned;
869}
870
871/**
872 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
873 * @adapter: board private structure
874 *
875 * the return value indicates whether actual cleaning was done, there
876 * is no guarantee that everything was cleaned
877 **/
878static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
879 int *work_done, int work_to_do)
880{
881 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
882 struct net_device *netdev = adapter->netdev;
883 struct pci_dev *pdev = adapter->pdev;
884 struct e1000_ring *rx_ring = adapter->rx_ring;
885 struct e1000_buffer *buffer_info, *next_buffer;
886 struct e1000_ps_page *ps_page;
887 struct sk_buff *skb;
888 unsigned int i, j;
889 u32 length, staterr;
890 int cleaned_count = 0;
891 bool cleaned = 0;
892 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
893
894 i = rx_ring->next_to_clean;
895 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
896 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
897 buffer_info = &rx_ring->buffer_info[i];
898
899 while (staterr & E1000_RXD_STAT_DD) {
900 if (*work_done >= work_to_do)
901 break;
902 (*work_done)++;
903 skb = buffer_info->skb;
904
905 /* in the packet split case this is header only */
906 prefetch(skb->data - NET_IP_ALIGN);
907
908 i++;
909 if (i == rx_ring->count)
910 i = 0;
911 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
912 prefetch(next_rxd);
913
914 next_buffer = &rx_ring->buffer_info[i];
915
916 cleaned = 1;
917 cleaned_count++;
918 pci_unmap_single(pdev, buffer_info->dma,
919 adapter->rx_ps_bsize0,
920 PCI_DMA_FROMDEVICE);
921 buffer_info->dma = 0;
922
923 if (!(staterr & E1000_RXD_STAT_EOP)) {
924 ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "
925 "up the full packet\n", netdev->name);
926 dev_kfree_skb_irq(skb);
927 goto next_desc;
928 }
929
930 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
931 dev_kfree_skb_irq(skb);
932 goto next_desc;
933 }
934
935 length = le16_to_cpu(rx_desc->wb.middle.length0);
936
937 if (!length) {
938 ndev_dbg(netdev, "%s: Last part of the packet spanning"
939 " multiple descriptors\n", netdev->name);
940 dev_kfree_skb_irq(skb);
941 goto next_desc;
942 }
943
944 /* Good Receive */
945 skb_put(skb, length);
946
947 {
948 /* this looks ugly, but it seems compiler issues make it
949 more efficient than reusing j */
950 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
951
952 /* page alloc/put takes too long and effects small packet
953 * throughput, so unsplit small packets and save the alloc/put*/
954 if (l1 && (l1 <= copybreak) &&
955 ((length + l1) <= adapter->rx_ps_bsize0)) {
956 u8 *vaddr;
957
958 ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS];
959
960 /* there is no documentation about how to call
961 * kmap_atomic, so we can't hold the mapping
962 * very long */
963 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
964 PAGE_SIZE, PCI_DMA_FROMDEVICE);
965 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
966 memcpy(skb_tail_pointer(skb), vaddr, l1);
967 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
968 pci_dma_sync_single_for_device(pdev, ps_page->dma,
969 PAGE_SIZE, PCI_DMA_FROMDEVICE);
970 /* remove the CRC */
971 l1 -= 4;
972 skb_put(skb, l1);
973 goto copydone;
974 } /* if */
975 }
976
977 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
978 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
979 if (!length)
980 break;
981
982 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j];
983 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
984 PCI_DMA_FROMDEVICE);
985 ps_page->dma = 0;
986 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
987 ps_page->page = NULL;
988 skb->len += length;
989 skb->data_len += length;
990 skb->truesize += length;
991 }
992
993 /* strip the ethernet crc, problem is we're using pages now so
994 * this whole operation can get a little cpu intensive */
995 pskb_trim(skb, skb->len - 4);
996
997copydone:
998 total_rx_bytes += skb->len;
999 total_rx_packets++;
1000
1001 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1002 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1003
1004 if (rx_desc->wb.upper.header_status &
1005 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1006 adapter->rx_hdr_split++;
1007
1008 e1000_receive_skb(adapter, netdev, skb,
1009 staterr, rx_desc->wb.middle.vlan);
1010
1011next_desc:
1012 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1013 buffer_info->skb = NULL;
1014
1015 /* return some buffers to hardware, one at a time is too slow */
1016 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1017 adapter->alloc_rx_buf(adapter, cleaned_count);
1018 cleaned_count = 0;
1019 }
1020
1021 /* use prefetched values */
1022 rx_desc = next_rxd;
1023 buffer_info = next_buffer;
1024
1025 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1026 }
1027 rx_ring->next_to_clean = i;
1028
1029 cleaned_count = e1000_desc_unused(rx_ring);
1030 if (cleaned_count)
1031 adapter->alloc_rx_buf(adapter, cleaned_count);
1032
1033 adapter->total_rx_packets += total_rx_packets;
1034 adapter->total_rx_bytes += total_rx_bytes;
1035 return cleaned;
1036}
1037
1038/**
1039 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1040 * @adapter: board private structure
1041 **/
1042static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1043{
1044 struct e1000_ring *rx_ring = adapter->rx_ring;
1045 struct e1000_buffer *buffer_info;
1046 struct e1000_ps_page *ps_page;
1047 struct pci_dev *pdev = adapter->pdev;
1048 unsigned long size;
1049 unsigned int i, j;
1050
1051 /* Free all the Rx ring sk_buffs */
1052 for (i = 0; i < rx_ring->count; i++) {
1053 buffer_info = &rx_ring->buffer_info[i];
1054 if (buffer_info->dma) {
1055 if (adapter->clean_rx == e1000_clean_rx_irq)
1056 pci_unmap_single(pdev, buffer_info->dma,
1057 adapter->rx_buffer_len,
1058 PCI_DMA_FROMDEVICE);
1059 else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
1060 pci_unmap_page(pdev, buffer_info->dma,
1061 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1062 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1063 pci_unmap_single(pdev, buffer_info->dma,
1064 adapter->rx_ps_bsize0,
1065 PCI_DMA_FROMDEVICE);
1066 buffer_info->dma = 0;
1067 }
1068
1069 if (buffer_info->page) {
1070 put_page(buffer_info->page);
1071 buffer_info->page = NULL;
1072 }
1073
1074 if (buffer_info->skb) {
1075 dev_kfree_skb(buffer_info->skb);
1076 buffer_info->skb = NULL;
1077 }
1078
1079 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1080 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
1081 + j];
1082 if (!ps_page->page)
1083 break;
1084 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1085 PCI_DMA_FROMDEVICE);
1086 ps_page->dma = 0;
1087 put_page(ps_page->page);
1088 ps_page->page = NULL;
1089 }
1090 }
1091
1092 /* there also may be some cached data from a chained receive */
1093 if (rx_ring->rx_skb_top) {
1094 dev_kfree_skb(rx_ring->rx_skb_top);
1095 rx_ring->rx_skb_top = NULL;
1096 }
1097
1098 size = sizeof(struct e1000_buffer) * rx_ring->count;
1099 memset(rx_ring->buffer_info, 0, size);
1100 size = sizeof(struct e1000_ps_page)
1101 * (rx_ring->count * PS_PAGE_BUFFERS);
1102 memset(rx_ring->ps_pages, 0, size);
1103
1104 /* Zero out the descriptor ring */
1105 memset(rx_ring->desc, 0, rx_ring->size);
1106
1107 rx_ring->next_to_clean = 0;
1108 rx_ring->next_to_use = 0;
1109
1110 writel(0, adapter->hw.hw_addr + rx_ring->head);
1111 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1112}
1113
1114/**
1115 * e1000_intr_msi - Interrupt Handler
1116 * @irq: interrupt number
1117 * @data: pointer to a network interface device structure
1118 **/
1119static irqreturn_t e1000_intr_msi(int irq, void *data)
1120{
1121 struct net_device *netdev = data;
1122 struct e1000_adapter *adapter = netdev_priv(netdev);
1123 struct e1000_hw *hw = &adapter->hw;
1124 u32 icr = er32(ICR);
1125
1126 /* read ICR disables interrupts using IAM, so keep up with our
1127 * enable/disable accounting */
1128 atomic_inc(&adapter->irq_sem);
1129
1130 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1131 hw->mac.get_link_status = 1;
1132 /* ICH8 workaround-- Call gig speed drop workaround on cable
1133 * disconnect (LSC) before accessing any PHY registers */
1134 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1135 (!(er32(STATUS) & E1000_STATUS_LU)))
1136 e1000e_gig_downshift_workaround_ich8lan(hw);
1137
1138 /* 80003ES2LAN workaround-- For packet buffer work-around on
1139 * link down event; disable receives here in the ISR and reset
1140 * adapter in watchdog */
1141 if (netif_carrier_ok(netdev) &&
1142 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1143 /* disable receives */
1144 u32 rctl = er32(RCTL);
1145 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1146 }
1147 /* guard against interrupt when we're going down */
1148 if (!test_bit(__E1000_DOWN, &adapter->state))
1149 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1150 }
1151
1152 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1153 adapter->total_tx_bytes = 0;
1154 adapter->total_tx_packets = 0;
1155 adapter->total_rx_bytes = 0;
1156 adapter->total_rx_packets = 0;
1157 __netif_rx_schedule(netdev, &adapter->napi);
1158 } else {
1159 atomic_dec(&adapter->irq_sem);
1160 }
1161
1162 return IRQ_HANDLED;
1163}
1164
1165/**
1166 * e1000_intr - Interrupt Handler
1167 * @irq: interrupt number
1168 * @data: pointer to a network interface device structure
1169 **/
1170static irqreturn_t e1000_intr(int irq, void *data)
1171{
1172 struct net_device *netdev = data;
1173 struct e1000_adapter *adapter = netdev_priv(netdev);
1174 struct e1000_hw *hw = &adapter->hw;
1175
1176 u32 rctl, icr = er32(ICR);
1177 if (!icr)
1178 return IRQ_NONE; /* Not our interrupt */
1179
1180 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1181 * not set, then the adapter didn't send an interrupt */
1182 if (!(icr & E1000_ICR_INT_ASSERTED))
1183 return IRQ_NONE;
1184
1185 /* Interrupt Auto-Mask...upon reading ICR,
1186 * interrupts are masked. No need for the
1187 * IMC write, but it does mean we should
1188 * account for it ASAP. */
1189 atomic_inc(&adapter->irq_sem);
1190
1191 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1192 hw->mac.get_link_status = 1;
1193 /* ICH8 workaround-- Call gig speed drop workaround on cable
1194 * disconnect (LSC) before accessing any PHY registers */
1195 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1196 (!(er32(STATUS) & E1000_STATUS_LU)))
1197 e1000e_gig_downshift_workaround_ich8lan(hw);
1198
1199 /* 80003ES2LAN workaround--
1200 * For packet buffer work-around on link down event;
1201 * disable receives here in the ISR and
1202 * reset adapter in watchdog
1203 */
1204 if (netif_carrier_ok(netdev) &&
1205 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1206 /* disable receives */
1207 rctl = er32(RCTL);
1208 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1209 }
1210 /* guard against interrupt when we're going down */
1211 if (!test_bit(__E1000_DOWN, &adapter->state))
1212 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1213 }
1214
1215 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1216 adapter->total_tx_bytes = 0;
1217 adapter->total_tx_packets = 0;
1218 adapter->total_rx_bytes = 0;
1219 adapter->total_rx_packets = 0;
1220 __netif_rx_schedule(netdev, &adapter->napi);
1221 } else {
1222 atomic_dec(&adapter->irq_sem);
1223 }
1224
1225 return IRQ_HANDLED;
1226}
1227
1228static int e1000_request_irq(struct e1000_adapter *adapter)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 void (*handler) = &e1000_intr;
1232 int irq_flags = IRQF_SHARED;
1233 int err;
1234
1235 err = pci_enable_msi(adapter->pdev);
1236 if (err) {
1237 ndev_warn(netdev,
1238 "Unable to allocate MSI interrupt Error: %d\n", err);
1239 } else {
1240 adapter->flags |= FLAG_MSI_ENABLED;
1241 handler = &e1000_intr_msi;
1242 irq_flags = 0;
1243 }
1244
1245 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1246 netdev);
1247 if (err) {
1248 if (adapter->flags & FLAG_MSI_ENABLED)
1249 pci_disable_msi(adapter->pdev);
1250 ndev_err(netdev,
1251 "Unable to allocate interrupt Error: %d\n", err);
1252 }
1253
1254 return err;
1255}
1256
1257static void e1000_free_irq(struct e1000_adapter *adapter)
1258{
1259 struct net_device *netdev = adapter->netdev;
1260
1261 free_irq(adapter->pdev->irq, netdev);
1262 if (adapter->flags & FLAG_MSI_ENABLED) {
1263 pci_disable_msi(adapter->pdev);
1264 adapter->flags &= ~FLAG_MSI_ENABLED;
1265 }
1266}
1267
1268/**
1269 * e1000_irq_disable - Mask off interrupt generation on the NIC
1270 **/
1271static void e1000_irq_disable(struct e1000_adapter *adapter)
1272{
1273 struct e1000_hw *hw = &adapter->hw;
1274
1275 atomic_inc(&adapter->irq_sem);
1276 ew32(IMC, ~0);
1277 e1e_flush();
1278 synchronize_irq(adapter->pdev->irq);
1279}
1280
1281/**
1282 * e1000_irq_enable - Enable default interrupt generation settings
1283 **/
1284static void e1000_irq_enable(struct e1000_adapter *adapter)
1285{
1286 struct e1000_hw *hw = &adapter->hw;
1287
1288 if (atomic_dec_and_test(&adapter->irq_sem)) {
1289 ew32(IMS, IMS_ENABLE_MASK);
1290 e1e_flush();
1291 }
1292}
1293
1294/**
1295 * e1000_get_hw_control - get control of the h/w from f/w
1296 * @adapter: address of board private structure
1297 *
1298 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1299 * For ASF and Pass Through versions of f/w this means that
1300 * the driver is loaded. For AMT version (only with 82573)
1301 * of the f/w this means that the network i/f is open.
1302 **/
1303static void e1000_get_hw_control(struct e1000_adapter *adapter)
1304{
1305 struct e1000_hw *hw = &adapter->hw;
1306 u32 ctrl_ext;
1307 u32 swsm;
1308
1309 /* Let firmware know the driver has taken over */
1310 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1311 swsm = er32(SWSM);
1312 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1313 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1314 ctrl_ext = er32(CTRL_EXT);
1315 ew32(CTRL_EXT,
1316 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1317 }
1318}
1319
1320/**
1321 * e1000_release_hw_control - release control of the h/w to f/w
1322 * @adapter: address of board private structure
1323 *
1324 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1325 * For ASF and Pass Through versions of f/w this means that the
1326 * driver is no longer loaded. For AMT version (only with 82573) i
1327 * of the f/w this means that the network i/f is closed.
1328 *
1329 **/
1330static void e1000_release_hw_control(struct e1000_adapter *adapter)
1331{
1332 struct e1000_hw *hw = &adapter->hw;
1333 u32 ctrl_ext;
1334 u32 swsm;
1335
1336 /* Let firmware taken over control of h/w */
1337 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1338 swsm = er32(SWSM);
1339 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1340 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1341 ctrl_ext = er32(CTRL_EXT);
1342 ew32(CTRL_EXT,
1343 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1344 }
1345}
1346
1347static void e1000_release_manageability(struct e1000_adapter *adapter)
1348{
1349 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
1350 struct e1000_hw *hw = &adapter->hw;
1351
1352 u32 manc = er32(MANC);
1353
1354 /* re-enable hardware interception of ARP */
1355 manc |= E1000_MANC_ARP_EN;
1356 manc &= ~E1000_MANC_EN_MNG2HOST;
1357
1358 /* don't explicitly have to mess with MANC2H since
1359 * MANC has an enable disable that gates MANC2H */
1360 ew32(MANC, manc);
1361 }
1362}
1363
1364/**
1365 * @e1000_alloc_ring - allocate memory for a ring structure
1366 **/
1367static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1368 struct e1000_ring *ring)
1369{
1370 struct pci_dev *pdev = adapter->pdev;
1371
1372 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1373 GFP_KERNEL);
1374 if (!ring->desc)
1375 return -ENOMEM;
1376
1377 return 0;
1378}
1379
1380/**
1381 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1382 * @adapter: board private structure
1383 *
1384 * Return 0 on success, negative on failure
1385 **/
1386int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1387{
1388 struct e1000_ring *tx_ring = adapter->tx_ring;
1389 int err = -ENOMEM, size;
1390
1391 size = sizeof(struct e1000_buffer) * tx_ring->count;
1392 tx_ring->buffer_info = vmalloc(size);
1393 if (!tx_ring->buffer_info)
1394 goto err;
1395 memset(tx_ring->buffer_info, 0, size);
1396
1397 /* round up to nearest 4K */
1398 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1399 tx_ring->size = ALIGN(tx_ring->size, 4096);
1400
1401 err = e1000_alloc_ring_dma(adapter, tx_ring);
1402 if (err)
1403 goto err;
1404
1405 tx_ring->next_to_use = 0;
1406 tx_ring->next_to_clean = 0;
1407 spin_lock_init(&adapter->tx_queue_lock);
1408
1409 return 0;
1410err:
1411 vfree(tx_ring->buffer_info);
1412 ndev_err(adapter->netdev,
1413 "Unable to allocate memory for the transmit descriptor ring\n");
1414 return err;
1415}
1416
1417/**
1418 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1419 * @adapter: board private structure
1420 *
1421 * Returns 0 on success, negative on failure
1422 **/
1423int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1424{
1425 struct e1000_ring *rx_ring = adapter->rx_ring;
1426 int size, desc_len, err = -ENOMEM;
1427
1428 size = sizeof(struct e1000_buffer) * rx_ring->count;
1429 rx_ring->buffer_info = vmalloc(size);
1430 if (!rx_ring->buffer_info)
1431 goto err;
1432 memset(rx_ring->buffer_info, 0, size);
1433
1434 rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS,
1435 sizeof(struct e1000_ps_page),
1436 GFP_KERNEL);
1437 if (!rx_ring->ps_pages)
1438 goto err;
1439
1440 desc_len = sizeof(union e1000_rx_desc_packet_split);
1441
1442 /* Round up to nearest 4K */
1443 rx_ring->size = rx_ring->count * desc_len;
1444 rx_ring->size = ALIGN(rx_ring->size, 4096);
1445
1446 err = e1000_alloc_ring_dma(adapter, rx_ring);
1447 if (err)
1448 goto err;
1449
1450 rx_ring->next_to_clean = 0;
1451 rx_ring->next_to_use = 0;
1452 rx_ring->rx_skb_top = NULL;
1453
1454 return 0;
1455err:
1456 vfree(rx_ring->buffer_info);
1457 kfree(rx_ring->ps_pages);
1458 ndev_err(adapter->netdev,
1459 "Unable to allocate memory for the transmit descriptor ring\n");
1460 return err;
1461}
1462
1463/**
1464 * e1000_clean_tx_ring - Free Tx Buffers
1465 * @adapter: board private structure
1466 **/
1467static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1468{
1469 struct e1000_ring *tx_ring = adapter->tx_ring;
1470 struct e1000_buffer *buffer_info;
1471 unsigned long size;
1472 unsigned int i;
1473
1474 for (i = 0; i < tx_ring->count; i++) {
1475 buffer_info = &tx_ring->buffer_info[i];
1476 e1000_put_txbuf(adapter, buffer_info);
1477 }
1478
1479 size = sizeof(struct e1000_buffer) * tx_ring->count;
1480 memset(tx_ring->buffer_info, 0, size);
1481
1482 memset(tx_ring->desc, 0, tx_ring->size);
1483
1484 tx_ring->next_to_use = 0;
1485 tx_ring->next_to_clean = 0;
1486
1487 writel(0, adapter->hw.hw_addr + tx_ring->head);
1488 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1489}
1490
1491/**
1492 * e1000e_free_tx_resources - Free Tx Resources per Queue
1493 * @adapter: board private structure
1494 *
1495 * Free all transmit software resources
1496 **/
1497void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1498{
1499 struct pci_dev *pdev = adapter->pdev;
1500 struct e1000_ring *tx_ring = adapter->tx_ring;
1501
1502 e1000_clean_tx_ring(adapter);
1503
1504 vfree(tx_ring->buffer_info);
1505 tx_ring->buffer_info = NULL;
1506
1507 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1508 tx_ring->dma);
1509 tx_ring->desc = NULL;
1510}
1511
1512/**
1513 * e1000e_free_rx_resources - Free Rx Resources
1514 * @adapter: board private structure
1515 *
1516 * Free all receive software resources
1517 **/
1518
1519void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1520{
1521 struct pci_dev *pdev = adapter->pdev;
1522 struct e1000_ring *rx_ring = adapter->rx_ring;
1523
1524 e1000_clean_rx_ring(adapter);
1525
1526 vfree(rx_ring->buffer_info);
1527 rx_ring->buffer_info = NULL;
1528
1529 kfree(rx_ring->ps_pages);
1530 rx_ring->ps_pages = NULL;
1531
1532 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1533 rx_ring->dma);
1534 rx_ring->desc = NULL;
1535}
1536
1537/**
1538 * e1000_update_itr - update the dynamic ITR value based on statistics
1539 * Stores a new ITR value based on packets and byte
1540 * counts during the last interrupt. The advantage of per interrupt
1541 * computation is faster updates and more accurate ITR for the current
1542 * traffic pattern. Constants in this function were computed
1543 * based on theoretical maximum wire speed and thresholds were set based
1544 * on testing data as well as attempting to minimize response time
1545 * while increasing bulk throughput.
1546 * this functionality is controlled by the InterruptThrottleRate module
1547 * parameter (see e1000_param.c)
1548 * @adapter: pointer to adapter
1549 * @itr_setting: current adapter->itr
1550 * @packets: the number of packets during this measurement interval
1551 * @bytes: the number of bytes during this measurement interval
1552 **/
1553static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1554 u16 itr_setting, int packets,
1555 int bytes)
1556{
1557 unsigned int retval = itr_setting;
1558
1559 if (packets == 0)
1560 goto update_itr_done;
1561
1562 switch (itr_setting) {
1563 case lowest_latency:
1564 /* handle TSO and jumbo frames */
1565 if (bytes/packets > 8000)
1566 retval = bulk_latency;
1567 else if ((packets < 5) && (bytes > 512)) {
1568 retval = low_latency;
1569 }
1570 break;
1571 case low_latency: /* 50 usec aka 20000 ints/s */
1572 if (bytes > 10000) {
1573 /* this if handles the TSO accounting */
1574 if (bytes/packets > 8000) {
1575 retval = bulk_latency;
1576 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
1577 retval = bulk_latency;
1578 } else if ((packets > 35)) {
1579 retval = lowest_latency;
1580 }
1581 } else if (bytes/packets > 2000) {
1582 retval = bulk_latency;
1583 } else if (packets <= 2 && bytes < 512) {
1584 retval = lowest_latency;
1585 }
1586 break;
1587 case bulk_latency: /* 250 usec aka 4000 ints/s */
1588 if (bytes > 25000) {
1589 if (packets > 35) {
1590 retval = low_latency;
1591 }
1592 } else if (bytes < 6000) {
1593 retval = low_latency;
1594 }
1595 break;
1596 }
1597
1598update_itr_done:
1599 return retval;
1600}
1601
1602static void e1000_set_itr(struct e1000_adapter *adapter)
1603{
1604 struct e1000_hw *hw = &adapter->hw;
1605 u16 current_itr;
1606 u32 new_itr = adapter->itr;
1607
1608 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1609 if (adapter->link_speed != SPEED_1000) {
1610 current_itr = 0;
1611 new_itr = 4000;
1612 goto set_itr_now;
1613 }
1614
1615 adapter->tx_itr = e1000_update_itr(adapter,
1616 adapter->tx_itr,
1617 adapter->total_tx_packets,
1618 adapter->total_tx_bytes);
1619 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1620 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1621 adapter->tx_itr = low_latency;
1622
1623 adapter->rx_itr = e1000_update_itr(adapter,
1624 adapter->rx_itr,
1625 adapter->total_rx_packets,
1626 adapter->total_rx_bytes);
1627 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1628 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1629 adapter->rx_itr = low_latency;
1630
1631 current_itr = max(adapter->rx_itr, adapter->tx_itr);
1632
1633 switch (current_itr) {
1634 /* counts and packets in update_itr are dependent on these numbers */
1635 case lowest_latency:
1636 new_itr = 70000;
1637 break;
1638 case low_latency:
1639 new_itr = 20000; /* aka hwitr = ~200 */
1640 break;
1641 case bulk_latency:
1642 new_itr = 4000;
1643 break;
1644 default:
1645 break;
1646 }
1647
1648set_itr_now:
1649 if (new_itr != adapter->itr) {
1650 /* this attempts to bias the interrupt rate towards Bulk
1651 * by adding intermediate steps when interrupt rate is
1652 * increasing */
1653 new_itr = new_itr > adapter->itr ?
1654 min(adapter->itr + (new_itr >> 2), new_itr) :
1655 new_itr;
1656 adapter->itr = new_itr;
1657 ew32(ITR, 1000000000 / (new_itr * 256));
1658 }
1659}
1660
1661/**
1662 * e1000_clean - NAPI Rx polling callback
1663 * @adapter: board private structure
1664 **/
1665static int e1000_clean(struct napi_struct *napi, int budget)
1666{
1667 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1668 struct net_device *poll_dev = adapter->netdev;
1669 int tx_cleaned = 0, work_done = 0;
1670
1671 /* Must NOT use netdev_priv macro here. */
1672 adapter = poll_dev->priv;
1673
1674 /* Keep link state information with original netdev */
1675 if (!netif_carrier_ok(poll_dev))
1676 goto quit_polling;
1677
1678 /* e1000_clean is called per-cpu. This lock protects
1679 * tx_ring from being cleaned by multiple cpus
1680 * simultaneously. A failure obtaining the lock means
1681 * tx_ring is currently being cleaned anyway. */
1682 if (spin_trylock(&adapter->tx_queue_lock)) {
1683 tx_cleaned = e1000_clean_tx_irq(adapter);
1684 spin_unlock(&adapter->tx_queue_lock);
1685 }
1686
1687 adapter->clean_rx(adapter, &work_done, budget);
1688
1689 /* If no Tx and not enough Rx work done, exit the polling mode */
1690 if ((!tx_cleaned && (work_done < budget)) ||
1691 !netif_running(poll_dev)) {
1692quit_polling:
1693 if (adapter->itr_setting & 3)
1694 e1000_set_itr(adapter);
1695 netif_rx_complete(poll_dev, napi);
1696 e1000_irq_enable(adapter);
1697 }
1698
1699 return work_done;
1700}
1701
1702static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1703{
1704 struct e1000_adapter *adapter = netdev_priv(netdev);
1705 struct e1000_hw *hw = &adapter->hw;
1706 u32 vfta, index;
1707
1708 /* don't update vlan cookie if already programmed */
1709 if ((adapter->hw.mng_cookie.status &
1710 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1711 (vid == adapter->mng_vlan_id))
1712 return;
1713 /* add VID to filter table */
1714 index = (vid >> 5) & 0x7F;
1715 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1716 vfta |= (1 << (vid & 0x1F));
1717 e1000e_write_vfta(hw, index, vfta);
1718}
1719
1720static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1721{
1722 struct e1000_adapter *adapter = netdev_priv(netdev);
1723 struct e1000_hw *hw = &adapter->hw;
1724 u32 vfta, index;
1725
1726 e1000_irq_disable(adapter);
1727 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1728 e1000_irq_enable(adapter);
1729
1730 if ((adapter->hw.mng_cookie.status &
1731 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1732 (vid == adapter->mng_vlan_id)) {
1733 /* release control to f/w */
1734 e1000_release_hw_control(adapter);
1735 return;
1736 }
1737
1738 /* remove VID from filter table */
1739 index = (vid >> 5) & 0x7F;
1740 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1741 vfta &= ~(1 << (vid & 0x1F));
1742 e1000e_write_vfta(hw, index, vfta);
1743}
1744
1745static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
1746{
1747 struct net_device *netdev = adapter->netdev;
1748 u16 vid = adapter->hw.mng_cookie.vlan_id;
1749 u16 old_vid = adapter->mng_vlan_id;
1750
1751 if (!adapter->vlgrp)
1752 return;
1753
1754 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1755 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1756 if (adapter->hw.mng_cookie.status &
1757 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1758 e1000_vlan_rx_add_vid(netdev, vid);
1759 adapter->mng_vlan_id = vid;
1760 }
1761
1762 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
1763 (vid != old_vid) &&
1764 !vlan_group_get_device(adapter->vlgrp, old_vid))
1765 e1000_vlan_rx_kill_vid(netdev, old_vid);
1766 } else {
1767 adapter->mng_vlan_id = vid;
1768 }
1769}
1770
1771
1772static void e1000_vlan_rx_register(struct net_device *netdev,
1773 struct vlan_group *grp)
1774{
1775 struct e1000_adapter *adapter = netdev_priv(netdev);
1776 struct e1000_hw *hw = &adapter->hw;
1777 u32 ctrl, rctl;
1778
1779 e1000_irq_disable(adapter);
1780 adapter->vlgrp = grp;
1781
1782 if (grp) {
1783 /* enable VLAN tag insert/strip */
1784 ctrl = er32(CTRL);
1785 ctrl |= E1000_CTRL_VME;
1786 ew32(CTRL, ctrl);
1787
1788 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1789 /* enable VLAN receive filtering */
1790 rctl = er32(RCTL);
1791 rctl |= E1000_RCTL_VFE;
1792 rctl &= ~E1000_RCTL_CFIEN;
1793 ew32(RCTL, rctl);
1794 e1000_update_mng_vlan(adapter);
1795 }
1796 } else {
1797 /* disable VLAN tag insert/strip */
1798 ctrl = er32(CTRL);
1799 ctrl &= ~E1000_CTRL_VME;
1800 ew32(CTRL, ctrl);
1801
1802 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1803 /* disable VLAN filtering */
1804 rctl = er32(RCTL);
1805 rctl &= ~E1000_RCTL_VFE;
1806 ew32(RCTL, rctl);
1807 if (adapter->mng_vlan_id !=
1808 (u16)E1000_MNG_VLAN_NONE) {
1809 e1000_vlan_rx_kill_vid(netdev,
1810 adapter->mng_vlan_id);
1811 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1812 }
1813 }
1814 }
1815
1816 e1000_irq_enable(adapter);
1817}
1818
1819static void e1000_restore_vlan(struct e1000_adapter *adapter)
1820{
1821 u16 vid;
1822
1823 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1824
1825 if (!adapter->vlgrp)
1826 return;
1827
1828 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1829 if (!vlan_group_get_device(adapter->vlgrp, vid))
1830 continue;
1831 e1000_vlan_rx_add_vid(adapter->netdev, vid);
1832 }
1833}
1834
1835static void e1000_init_manageability(struct e1000_adapter *adapter)
1836{
1837 struct e1000_hw *hw = &adapter->hw;
1838 u32 manc, manc2h;
1839
1840 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
1841 return;
1842
1843 manc = er32(MANC);
1844
1845 /* disable hardware interception of ARP */
1846 manc &= ~(E1000_MANC_ARP_EN);
1847
1848 /* enable receiving management packets to the host. this will probably
1849 * generate destination unreachable messages from the host OS, but
1850 * the packets will be handled on SMBUS */
1851 manc |= E1000_MANC_EN_MNG2HOST;
1852 manc2h = er32(MANC2H);
1853#define E1000_MNG2HOST_PORT_623 (1 << 5)
1854#define E1000_MNG2HOST_PORT_664 (1 << 6)
1855 manc2h |= E1000_MNG2HOST_PORT_623;
1856 manc2h |= E1000_MNG2HOST_PORT_664;
1857 ew32(MANC2H, manc2h);
1858 ew32(MANC, manc);
1859}
1860
1861/**
1862 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1863 * @adapter: board private structure
1864 *
1865 * Configure the Tx unit of the MAC after a reset.
1866 **/
1867static void e1000_configure_tx(struct e1000_adapter *adapter)
1868{
1869 struct e1000_hw *hw = &adapter->hw;
1870 struct e1000_ring *tx_ring = adapter->tx_ring;
1871 u64 tdba;
1872 u32 tdlen, tctl, tipg, tarc;
1873 u32 ipgr1, ipgr2;
1874
1875 /* Setup the HW Tx Head and Tail descriptor pointers */
1876 tdba = tx_ring->dma;
1877 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
1878 ew32(TDBAL, (tdba & DMA_32BIT_MASK));
1879 ew32(TDBAH, (tdba >> 32));
1880 ew32(TDLEN, tdlen);
1881 ew32(TDH, 0);
1882 ew32(TDT, 0);
1883 tx_ring->head = E1000_TDH;
1884 tx_ring->tail = E1000_TDT;
1885
1886 /* Set the default values for the Tx Inter Packet Gap timer */
1887 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
1888 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
1889 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
1890
1891 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
1892 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
1893
1894 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1895 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1896 ew32(TIPG, tipg);
1897
1898 /* Set the Tx Interrupt Delay register */
1899 ew32(TIDV, adapter->tx_int_delay);
1900 /* tx irq moderation */
1901 ew32(TADV, adapter->tx_abs_int_delay);
1902
1903 /* Program the Transmit Control Register */
1904 tctl = er32(TCTL);
1905 tctl &= ~E1000_TCTL_CT;
1906 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1907 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1908
1909 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1910 tarc = er32(TARC0);
1911 /* set the speed mode bit, we'll clear it if we're not at
1912 * gigabit link later */
1913#define SPEED_MODE_BIT (1 << 21)
1914 tarc |= SPEED_MODE_BIT;
1915 ew32(TARC0, tarc);
1916 }
1917
1918 /* errata: program both queues to unweighted RR */
1919 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
1920 tarc = er32(TARC0);
1921 tarc |= 1;
1922 ew32(TARC0, tarc);
1923 tarc = er32(TARC1);
1924 tarc |= 1;
1925 ew32(TARC1, tarc);
1926 }
1927
1928 e1000e_config_collision_dist(hw);
1929
1930 /* Setup Transmit Descriptor Settings for eop descriptor */
1931 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1932
1933 /* only set IDE if we are delaying interrupts using the timers */
1934 if (adapter->tx_int_delay)
1935 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1936
1937 /* enable Report Status bit */
1938 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1939
1940 ew32(TCTL, tctl);
1941
1942 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1943}
1944
1945/**
1946 * e1000_setup_rctl - configure the receive control registers
1947 * @adapter: Board private structure
1948 **/
1949#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1950 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1951static void e1000_setup_rctl(struct e1000_adapter *adapter)
1952{
1953 struct e1000_hw *hw = &adapter->hw;
1954 u32 rctl, rfctl;
1955 u32 psrctl = 0;
1956 u32 pages = 0;
1957
1958 /* Program MC offset vector base */
1959 rctl = er32(RCTL);
1960 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1961 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1962 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1963 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1964
1965 /* Do not Store bad packets */
1966 rctl &= ~E1000_RCTL_SBP;
1967
1968 /* Enable Long Packet receive */
1969 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1970 rctl &= ~E1000_RCTL_LPE;
1971 else
1972 rctl |= E1000_RCTL_LPE;
1973
1974 /* Setup buffer sizes */
1975 rctl &= ~E1000_RCTL_SZ_4096;
1976 rctl |= E1000_RCTL_BSEX;
1977 switch (adapter->rx_buffer_len) {
1978 case 256:
1979 rctl |= E1000_RCTL_SZ_256;
1980 rctl &= ~E1000_RCTL_BSEX;
1981 break;
1982 case 512:
1983 rctl |= E1000_RCTL_SZ_512;
1984 rctl &= ~E1000_RCTL_BSEX;
1985 break;
1986 case 1024:
1987 rctl |= E1000_RCTL_SZ_1024;
1988 rctl &= ~E1000_RCTL_BSEX;
1989 break;
1990 case 2048:
1991 default:
1992 rctl |= E1000_RCTL_SZ_2048;
1993 rctl &= ~E1000_RCTL_BSEX;
1994 break;
1995 case 4096:
1996 rctl |= E1000_RCTL_SZ_4096;
1997 break;
1998 case 8192:
1999 rctl |= E1000_RCTL_SZ_8192;
2000 break;
2001 case 16384:
2002 rctl |= E1000_RCTL_SZ_16384;
2003 break;
2004 }
2005
2006 /*
2007 * 82571 and greater support packet-split where the protocol
2008 * header is placed in skb->data and the packet data is
2009 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2010 * In the case of a non-split, skb->data is linearly filled,
2011 * followed by the page buffers. Therefore, skb->data is
2012 * sized to hold the largest protocol header.
2013 *
2014 * allocations using alloc_page take too long for regular MTU
2015 * so only enable packet split for jumbo frames
2016 *
2017 * Using pages when the page size is greater than 16k wastes
2018 * a lot of memory, since we allocate 3 pages at all times
2019 * per packet.
2020 */
2021 adapter->rx_ps_pages = 0;
2022 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2023 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2024 adapter->rx_ps_pages = pages;
2025
2026 if (adapter->rx_ps_pages) {
2027 /* Configure extra packet-split registers */
2028 rfctl = er32(RFCTL);
2029 rfctl |= E1000_RFCTL_EXTEN;
2030 /* disable packet split support for IPv6 extension headers,
2031 * because some malformed IPv6 headers can hang the RX */
2032 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2033 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2034
2035 ew32(RFCTL, rfctl);
2036
2037 /* disable the stripping of CRC because it breaks
2038 * BMC firmware connected over SMBUS */
2039 rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
2040
2041 psrctl |= adapter->rx_ps_bsize0 >>
2042 E1000_PSRCTL_BSIZE0_SHIFT;
2043
2044 switch (adapter->rx_ps_pages) {
2045 case 3:
2046 psrctl |= PAGE_SIZE <<
2047 E1000_PSRCTL_BSIZE3_SHIFT;
2048 case 2:
2049 psrctl |= PAGE_SIZE <<
2050 E1000_PSRCTL_BSIZE2_SHIFT;
2051 case 1:
2052 psrctl |= PAGE_SIZE >>
2053 E1000_PSRCTL_BSIZE1_SHIFT;
2054 break;
2055 }
2056
2057 ew32(PSRCTL, psrctl);
2058 }
2059
2060 ew32(RCTL, rctl);
2061}
2062
2063/**
2064 * e1000_configure_rx - Configure Receive Unit after Reset
2065 * @adapter: board private structure
2066 *
2067 * Configure the Rx unit of the MAC after a reset.
2068 **/
2069static void e1000_configure_rx(struct e1000_adapter *adapter)
2070{
2071 struct e1000_hw *hw = &adapter->hw;
2072 struct e1000_ring *rx_ring = adapter->rx_ring;
2073 u64 rdba;
2074 u32 rdlen, rctl, rxcsum, ctrl_ext;
2075
2076 if (adapter->rx_ps_pages) {
2077 /* this is a 32 byte descriptor */
2078 rdlen = rx_ring->count *
2079 sizeof(union e1000_rx_desc_packet_split);
2080 adapter->clean_rx = e1000_clean_rx_irq_ps;
2081 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2082 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
2083 rdlen = rx_ring->count *
2084 sizeof(struct e1000_rx_desc);
2085 adapter->clean_rx = e1000_clean_rx_irq_jumbo;
2086 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
2087 } else {
2088 rdlen = rx_ring->count *
2089 sizeof(struct e1000_rx_desc);
2090 adapter->clean_rx = e1000_clean_rx_irq;
2091 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2092 }
2093
2094 /* disable receives while setting up the descriptors */
2095 rctl = er32(RCTL);
2096 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2097 e1e_flush();
2098 msleep(10);
2099
2100 /* set the Receive Delay Timer Register */
2101 ew32(RDTR, adapter->rx_int_delay);
2102
2103 /* irq moderation */
2104 ew32(RADV, adapter->rx_abs_int_delay);
2105 if (adapter->itr_setting != 0)
2106 ew32(ITR,
2107 1000000000 / (adapter->itr * 256));
2108
2109 ctrl_ext = er32(CTRL_EXT);
2110 /* Reset delay timers after every interrupt */
2111 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2112 /* Auto-Mask interrupts upon ICR access */
2113 ctrl_ext |= E1000_CTRL_EXT_IAME;
2114 ew32(IAM, 0xffffffff);
2115 ew32(CTRL_EXT, ctrl_ext);
2116 e1e_flush();
2117
2118 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2119 * the Base and Length of the Rx Descriptor Ring */
2120 rdba = rx_ring->dma;
2121 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
2122 ew32(RDBAH, (rdba >> 32));
2123 ew32(RDLEN, rdlen);
2124 ew32(RDH, 0);
2125 ew32(RDT, 0);
2126 rx_ring->head = E1000_RDH;
2127 rx_ring->tail = E1000_RDT;
2128
2129 /* Enable Receive Checksum Offload for TCP and UDP */
2130 rxcsum = er32(RXCSUM);
2131 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2132 rxcsum |= E1000_RXCSUM_TUOFL;
2133
2134 /* IPv4 payload checksum for UDP fragments must be
2135 * used in conjunction with packet-split. */
2136 if (adapter->rx_ps_pages)
2137 rxcsum |= E1000_RXCSUM_IPPCSE;
2138 } else {
2139 rxcsum &= ~E1000_RXCSUM_TUOFL;
2140 /* no need to clear IPPCSE as it defaults to 0 */
2141 }
2142 ew32(RXCSUM, rxcsum);
2143
2144 /* Enable early receives on supported devices, only takes effect when
2145 * packet size is equal or larger than the specified value (in 8 byte
2146 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
2147 if ((adapter->flags & FLAG_HAS_ERT) &&
2148 (adapter->netdev->mtu > ETH_DATA_LEN))
2149 ew32(ERT, E1000_ERT_2048);
2150
2151 /* Enable Receives */
2152 ew32(RCTL, rctl);
2153}
2154
2155/**
2156 * e1000_mc_addr_list_update - Update Multicast addresses
2157 * @hw: pointer to the HW structure
2158 * @mc_addr_list: array of multicast addresses to program
2159 * @mc_addr_count: number of multicast addresses to program
2160 * @rar_used_count: the first RAR register free to program
2161 * @rar_count: total number of supported Receive Address Registers
2162 *
2163 * Updates the Receive Address Registers and Multicast Table Array.
2164 * The caller must have a packed mc_addr_list of multicast addresses.
2165 * The parameter rar_count will usually be hw->mac.rar_entry_count
2166 * unless there are workarounds that change this. Currently no func pointer
2167 * exists and all implementations are handled in the generic version of this
2168 * function.
2169 **/
2170static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list,
2171 u32 mc_addr_count, u32 rar_used_count,
2172 u32 rar_count)
2173{
2174 hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
2175 rar_used_count, rar_count);
2176}
2177
2178/**
2179 * e1000_set_multi - Multicast and Promiscuous mode set
2180 * @netdev: network interface device structure
2181 *
2182 * The set_multi entry point is called whenever the multicast address
2183 * list or the network interface flags are updated. This routine is
2184 * responsible for configuring the hardware for proper multicast,
2185 * promiscuous mode, and all-multi behavior.
2186 **/
2187static void e1000_set_multi(struct net_device *netdev)
2188{
2189 struct e1000_adapter *adapter = netdev_priv(netdev);
2190 struct e1000_hw *hw = &adapter->hw;
2191 struct e1000_mac_info *mac = &hw->mac;
2192 struct dev_mc_list *mc_ptr;
2193 u8 *mta_list;
2194 u32 rctl;
2195 int i;
2196
2197 /* Check for Promiscuous and All Multicast modes */
2198
2199 rctl = er32(RCTL);
2200
2201 if (netdev->flags & IFF_PROMISC) {
2202 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2203 } else if (netdev->flags & IFF_ALLMULTI) {
2204 rctl |= E1000_RCTL_MPE;
2205 rctl &= ~E1000_RCTL_UPE;
2206 } else {
2207 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2208 }
2209
2210 ew32(RCTL, rctl);
2211
2212 if (netdev->mc_count) {
2213 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
2214 if (!mta_list)
2215 return;
2216
2217 /* prepare a packed array of only addresses. */
2218 mc_ptr = netdev->mc_list;
2219
2220 for (i = 0; i < netdev->mc_count; i++) {
2221 if (!mc_ptr)
2222 break;
2223 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2224 ETH_ALEN);
2225 mc_ptr = mc_ptr->next;
2226 }
2227
2228 e1000_mc_addr_list_update(hw, mta_list, i, 1,
2229 mac->rar_entry_count);
2230 kfree(mta_list);
2231 } else {
2232 /*
2233 * if we're called from probe, we might not have
2234 * anything to do here, so clear out the list
2235 */
2236 e1000_mc_addr_list_update(hw, NULL, 0, 1,
2237 mac->rar_entry_count);
2238 }
2239}
2240
2241/**
2242 * e1000_configure - configure the hardware for RX and TX
2243 * @adapter: private board structure
2244 **/
2245static void e1000_configure(struct e1000_adapter *adapter)
2246{
2247 e1000_set_multi(adapter->netdev);
2248
2249 e1000_restore_vlan(adapter);
2250 e1000_init_manageability(adapter);
2251
2252 e1000_configure_tx(adapter);
2253 e1000_setup_rctl(adapter);
2254 e1000_configure_rx(adapter);
2255 adapter->alloc_rx_buf(adapter,
2256 e1000_desc_unused(adapter->rx_ring));
2257}
2258
2259/**
2260 * e1000e_power_up_phy - restore link in case the phy was powered down
2261 * @adapter: address of board private structure
2262 *
2263 * The phy may be powered down to save power and turn off link when the
2264 * driver is unloaded and wake on lan is not enabled (among others)
2265 * *** this routine MUST be followed by a call to e1000e_reset ***
2266 **/
2267void e1000e_power_up_phy(struct e1000_adapter *adapter)
2268{
2269 u16 mii_reg = 0;
2270
2271 /* Just clear the power down bit to wake the phy back up */
2272 if (adapter->hw.media_type == e1000_media_type_copper) {
2273 /* according to the manual, the phy will retain its
2274 * settings across a power-down/up cycle */
2275 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
2276 mii_reg &= ~MII_CR_POWER_DOWN;
2277 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
2278 }
2279
2280 adapter->hw.mac.ops.setup_link(&adapter->hw);
2281}
2282
2283/**
2284 * e1000_power_down_phy - Power down the PHY
2285 *
2286 * Power down the PHY so no link is implied when interface is down
2287 * The PHY cannot be powered down is management or WoL is active
2288 */
2289static void e1000_power_down_phy(struct e1000_adapter *adapter)
2290{
2291 struct e1000_hw *hw = &adapter->hw;
2292 u16 mii_reg;
2293
2294 /* WoL is enabled */
2295 if (!adapter->wol)
2296 return;
2297
2298 /* non-copper PHY? */
2299 if (adapter->hw.media_type != e1000_media_type_copper)
2300 return;
2301
2302 /* reset is blocked because of a SoL/IDER session */
2303 if (e1000e_check_mng_mode(hw) ||
2304 e1000_check_reset_block(hw))
2305 return;
2306
2307 /* managebility (AMT) is enabled */
2308 if (er32(MANC) & E1000_MANC_SMBUS_EN)
2309 return;
2310
2311 /* power down the PHY */
2312 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2313 mii_reg |= MII_CR_POWER_DOWN;
2314 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2315 mdelay(1);
2316}
2317
2318/**
2319 * e1000e_reset - bring the hardware into a known good state
2320 *
2321 * This function boots the hardware and enables some settings that
2322 * require a configuration cycle of the hardware - those cannot be
2323 * set/changed during runtime. After reset the device needs to be
2324 * properly configured for rx, tx etc.
2325 */
2326void e1000e_reset(struct e1000_adapter *adapter)
2327{
2328 struct e1000_mac_info *mac = &adapter->hw.mac;
2329 struct e1000_hw *hw = &adapter->hw;
2330 u32 tx_space, min_tx_space, min_rx_space;
2331 u16 hwm;
2332
2333 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2334 /* To maintain wire speed transmits, the Tx FIFO should be
2335 * large enough to accommodate two full transmit packets,
2336 * rounded up to the next 1KB and expressed in KB. Likewise,
2337 * the Rx FIFO should be large enough to accommodate at least
2338 * one full receive packet and is similarly rounded up and
2339 * expressed in KB. */
2340 adapter->pba = er32(PBA);
2341 /* upper 16 bits has Tx packet buffer allocation size in KB */
2342 tx_space = adapter->pba >> 16;
2343 /* lower 16 bits has Rx packet buffer allocation size in KB */
2344 adapter->pba &= 0xffff;
2345 /* the tx fifo also stores 16 bytes of information about the tx
2346 * but don't include ethernet FCS because hardware appends it */
2347 min_tx_space = (mac->max_frame_size +
2348 sizeof(struct e1000_tx_desc) -
2349 ETH_FCS_LEN) * 2;
2350 min_tx_space = ALIGN(min_tx_space, 1024);
2351 min_tx_space >>= 10;
2352 /* software strips receive CRC, so leave room for it */
2353 min_rx_space = mac->max_frame_size;
2354 min_rx_space = ALIGN(min_rx_space, 1024);
2355 min_rx_space >>= 10;
2356
2357 /* If current Tx allocation is less than the min Tx FIFO size,
2358 * and the min Tx FIFO size is less than the current Rx FIFO
2359 * allocation, take space away from current Rx allocation */
2360 if (tx_space < min_tx_space &&
2361 ((min_tx_space - tx_space) < adapter->pba)) {
2362 adapter->pba -= - (min_tx_space - tx_space);
2363
2364 /* if short on rx space, rx wins and must trump tx
2365 * adjustment or use Early Receive if available */
2366 if ((adapter->pba < min_rx_space) &&
2367 (!(adapter->flags & FLAG_HAS_ERT)))
2368 /* ERT enabled in e1000_configure_rx */
2369 adapter->pba = min_rx_space;
2370 }
2371 }
2372
2373 ew32(PBA, adapter->pba);
2374
2375 /* flow control settings */
2376 /* The high water mark must be low enough to fit one full frame
2377 * (or the size used for early receive) above it in the Rx FIFO.
2378 * Set it to the lower of:
2379 * - 90% of the Rx FIFO size, and
2380 * - the full Rx FIFO size minus the early receive size (for parts
2381 * with ERT support assuming ERT set to E1000_ERT_2048), or
2382 * - the full Rx FIFO size minus one full frame */
2383 if (adapter->flags & FLAG_HAS_ERT)
2384 hwm = min(((adapter->pba << 10) * 9 / 10),
2385 ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
2386 else
2387 hwm = min(((adapter->pba << 10) * 9 / 10),
2388 ((adapter->pba << 10) - mac->max_frame_size));
2389
2390 mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
2391 mac->fc_low_water = mac->fc_high_water - 8;
2392
2393 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2394 mac->fc_pause_time = 0xFFFF;
2395 else
2396 mac->fc_pause_time = E1000_FC_PAUSE_TIME;
2397 mac->fc = mac->original_fc;
2398
2399 /* Allow time for pending master requests to run */
2400 mac->ops.reset_hw(hw);
2401 ew32(WUC, 0);
2402
2403 if (mac->ops.init_hw(hw))
2404 ndev_err(adapter->netdev, "Hardware Error\n");
2405
2406 e1000_update_mng_vlan(adapter);
2407
2408 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2409 ew32(VET, ETH_P_8021Q);
2410
2411 e1000e_reset_adaptive(hw);
2412 e1000_get_phy_info(hw);
2413
2414 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2415 u16 phy_data = 0;
2416 /* speed up time to link by disabling smart power down, ignore
2417 * the return value of this function because there is nothing
2418 * different we would do if it failed */
2419 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2420 phy_data &= ~IGP02E1000_PM_SPD;
2421 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2422 }
2423
2424 e1000_release_manageability(adapter);
2425}
2426
2427int e1000e_up(struct e1000_adapter *adapter)
2428{
2429 struct e1000_hw *hw = &adapter->hw;
2430
2431 /* hardware has been reset, we need to reload some things */
2432 e1000_configure(adapter);
2433
2434 clear_bit(__E1000_DOWN, &adapter->state);
2435
2436 napi_enable(&adapter->napi);
2437 e1000_irq_enable(adapter);
2438
2439 /* fire a link change interrupt to start the watchdog */
2440 ew32(ICS, E1000_ICS_LSC);
2441 return 0;
2442}
2443
2444void e1000e_down(struct e1000_adapter *adapter)
2445{
2446 struct net_device *netdev = adapter->netdev;
2447 struct e1000_hw *hw = &adapter->hw;
2448 u32 tctl, rctl;
2449
2450 /* signal that we're down so the interrupt handler does not
2451 * reschedule our watchdog timer */
2452 set_bit(__E1000_DOWN, &adapter->state);
2453
2454 /* disable receives in the hardware */
2455 rctl = er32(RCTL);
2456 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2457 /* flush and sleep below */
2458
2459 netif_stop_queue(netdev);
2460
2461 /* disable transmits in the hardware */
2462 tctl = er32(TCTL);
2463 tctl &= ~E1000_TCTL_EN;
2464 ew32(TCTL, tctl);
2465 /* flush both disables and wait for them to finish */
2466 e1e_flush();
2467 msleep(10);
2468
2469 napi_disable(&adapter->napi);
2470 e1000_irq_disable(adapter);
2471
2472 del_timer_sync(&adapter->watchdog_timer);
2473 del_timer_sync(&adapter->phy_info_timer);
2474
2475 netdev->tx_queue_len = adapter->tx_queue_len;
2476 netif_carrier_off(netdev);
2477 adapter->link_speed = 0;
2478 adapter->link_duplex = 0;
2479
2480 e1000e_reset(adapter);
2481 e1000_clean_tx_ring(adapter);
2482 e1000_clean_rx_ring(adapter);
2483
2484 /*
2485 * TODO: for power management, we could drop the link and
2486 * pci_disable_device here.
2487 */
2488}
2489
2490void e1000e_reinit_locked(struct e1000_adapter *adapter)
2491{
2492 might_sleep();
2493 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2494 msleep(1);
2495 e1000e_down(adapter);
2496 e1000e_up(adapter);
2497 clear_bit(__E1000_RESETTING, &adapter->state);
2498}
2499
2500/**
2501 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2502 * @adapter: board private structure to initialize
2503 *
2504 * e1000_sw_init initializes the Adapter private data structure.
2505 * Fields are initialized based on PCI device information and
2506 * OS network device settings (MTU size).
2507 **/
2508static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2509{
2510 struct e1000_hw *hw = &adapter->hw;
2511 struct net_device *netdev = adapter->netdev;
2512
2513 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2514 adapter->rx_ps_bsize0 = 128;
2515 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2516 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2517
2518 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2519 if (!adapter->tx_ring)
2520 goto err;
2521
2522 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2523 if (!adapter->rx_ring)
2524 goto err;
2525
2526 spin_lock_init(&adapter->tx_queue_lock);
2527
2528 /* Explicitly disable IRQ since the NIC can be in any state. */
2529 atomic_set(&adapter->irq_sem, 0);
2530 e1000_irq_disable(adapter);
2531
2532 spin_lock_init(&adapter->stats_lock);
2533
2534 set_bit(__E1000_DOWN, &adapter->state);
2535 return 0;
2536
2537err:
2538 ndev_err(netdev, "Unable to allocate memory for queues\n");
2539 kfree(adapter->rx_ring);
2540 kfree(adapter->tx_ring);
2541 return -ENOMEM;
2542}
2543
2544/**
2545 * e1000_open - Called when a network interface is made active
2546 * @netdev: network interface device structure
2547 *
2548 * Returns 0 on success, negative value on failure
2549 *
2550 * The open entry point is called when a network interface is made
2551 * active by the system (IFF_UP). At this point all resources needed
2552 * for transmit and receive operations are allocated, the interrupt
2553 * handler is registered with the OS, the watchdog timer is started,
2554 * and the stack is notified that the interface is ready.
2555 **/
2556static int e1000_open(struct net_device *netdev)
2557{
2558 struct e1000_adapter *adapter = netdev_priv(netdev);
2559 struct e1000_hw *hw = &adapter->hw;
2560 int err;
2561
2562 /* disallow open during test */
2563 if (test_bit(__E1000_TESTING, &adapter->state))
2564 return -EBUSY;
2565
2566 /* allocate transmit descriptors */
2567 err = e1000e_setup_tx_resources(adapter);
2568 if (err)
2569 goto err_setup_tx;
2570
2571 /* allocate receive descriptors */
2572 err = e1000e_setup_rx_resources(adapter);
2573 if (err)
2574 goto err_setup_rx;
2575
2576 e1000e_power_up_phy(adapter);
2577
2578 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2579 if ((adapter->hw.mng_cookie.status &
2580 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2581 e1000_update_mng_vlan(adapter);
2582
2583 /* If AMT is enabled, let the firmware know that the network
2584 * interface is now open */
2585 if ((adapter->flags & FLAG_HAS_AMT) &&
2586 e1000e_check_mng_mode(&adapter->hw))
2587 e1000_get_hw_control(adapter);
2588
2589 /* before we allocate an interrupt, we must be ready to handle it.
2590 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2591 * as soon as we call pci_request_irq, so we have to setup our
2592 * clean_rx handler before we do so. */
2593 e1000_configure(adapter);
2594
2595 err = e1000_request_irq(adapter);
2596 if (err)
2597 goto err_req_irq;
2598
2599 /* From here on the code is the same as e1000e_up() */
2600 clear_bit(__E1000_DOWN, &adapter->state);
2601
2602 napi_enable(&adapter->napi);
2603
2604 e1000_irq_enable(adapter);
2605
2606 /* fire a link status change interrupt to start the watchdog */
2607 ew32(ICS, E1000_ICS_LSC);
2608
2609 return 0;
2610
2611err_req_irq:
2612 e1000_release_hw_control(adapter);
2613 e1000_power_down_phy(adapter);
2614 e1000e_free_rx_resources(adapter);
2615err_setup_rx:
2616 e1000e_free_tx_resources(adapter);
2617err_setup_tx:
2618 e1000e_reset(adapter);
2619
2620 return err;
2621}
2622
2623/**
2624 * e1000_close - Disables a network interface
2625 * @netdev: network interface device structure
2626 *
2627 * Returns 0, this is not allowed to fail
2628 *
2629 * The close entry point is called when an interface is de-activated
2630 * by the OS. The hardware is still under the drivers control, but
2631 * needs to be disabled. A global MAC reset is issued to stop the
2632 * hardware, and all transmit and receive resources are freed.
2633 **/
2634static int e1000_close(struct net_device *netdev)
2635{
2636 struct e1000_adapter *adapter = netdev_priv(netdev);
2637
2638 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
2639 e1000e_down(adapter);
2640 e1000_power_down_phy(adapter);
2641 e1000_free_irq(adapter);
2642
2643 e1000e_free_tx_resources(adapter);
2644 e1000e_free_rx_resources(adapter);
2645
2646 /* kill manageability vlan ID if supported, but not if a vlan with
2647 * the same ID is registered on the host OS (let 8021q kill it) */
2648 if ((adapter->hw.mng_cookie.status &
2649 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2650 !(adapter->vlgrp &&
2651 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2652 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2653
2654 /* If AMT is enabled, let the firmware know that the network
2655 * interface is now closed */
2656 if ((adapter->flags & FLAG_HAS_AMT) &&
2657 e1000e_check_mng_mode(&adapter->hw))
2658 e1000_release_hw_control(adapter);
2659
2660 return 0;
2661}
2662/**
2663 * e1000_set_mac - Change the Ethernet Address of the NIC
2664 * @netdev: network interface device structure
2665 * @p: pointer to an address structure
2666 *
2667 * Returns 0 on success, negative on failure
2668 **/
2669static int e1000_set_mac(struct net_device *netdev, void *p)
2670{
2671 struct e1000_adapter *adapter = netdev_priv(netdev);
2672 struct sockaddr *addr = p;
2673
2674 if (!is_valid_ether_addr(addr->sa_data))
2675 return -EADDRNOTAVAIL;
2676
2677 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2678 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2679
2680 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2681
2682 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
2683 /* activate the work around */
2684 e1000e_set_laa_state_82571(&adapter->hw, 1);
2685
2686 /* Hold a copy of the LAA in RAR[14] This is done so that
2687 * between the time RAR[0] gets clobbered and the time it
2688 * gets fixed (in e1000_watchdog), the actual LAA is in one
2689 * of the RARs and no incoming packets directed to this port
2690 * are dropped. Eventually the LAA will be in RAR[0] and
2691 * RAR[14] */
2692 e1000e_rar_set(&adapter->hw,
2693 adapter->hw.mac.addr,
2694 adapter->hw.mac.rar_entry_count - 1);
2695 }
2696
2697 return 0;
2698}
2699
2700/* Need to wait a few seconds after link up to get diagnostic information from
2701 * the phy */
2702static void e1000_update_phy_info(unsigned long data)
2703{
2704 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2705 e1000_get_phy_info(&adapter->hw);
2706}
2707
2708/**
2709 * e1000e_update_stats - Update the board statistics counters
2710 * @adapter: board private structure
2711 **/
2712void e1000e_update_stats(struct e1000_adapter *adapter)
2713{
2714 struct e1000_hw *hw = &adapter->hw;
2715 struct pci_dev *pdev = adapter->pdev;
2716 unsigned long irq_flags;
2717 u16 phy_tmp;
2718
2719#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2720
2721 /*
2722 * Prevent stats update while adapter is being reset, or if the pci
2723 * connection is down.
2724 */
2725 if (adapter->link_speed == 0)
2726 return;
2727 if (pci_channel_offline(pdev))
2728 return;
2729
2730 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2731
2732 /* these counters are modified from e1000_adjust_tbi_stats,
2733 * called from the interrupt context, so they must only
2734 * be written while holding adapter->stats_lock
2735 */
2736
2737 adapter->stats.crcerrs += er32(CRCERRS);
2738 adapter->stats.gprc += er32(GPRC);
2739 adapter->stats.gorcl += er32(GORCL);
2740 adapter->stats.gorch += er32(GORCH);
2741 adapter->stats.bprc += er32(BPRC);
2742 adapter->stats.mprc += er32(MPRC);
2743 adapter->stats.roc += er32(ROC);
2744
2745 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2746 adapter->stats.prc64 += er32(PRC64);
2747 adapter->stats.prc127 += er32(PRC127);
2748 adapter->stats.prc255 += er32(PRC255);
2749 adapter->stats.prc511 += er32(PRC511);
2750 adapter->stats.prc1023 += er32(PRC1023);
2751 adapter->stats.prc1522 += er32(PRC1522);
2752 adapter->stats.symerrs += er32(SYMERRS);
2753 adapter->stats.sec += er32(SEC);
2754 }
2755
2756 adapter->stats.mpc += er32(MPC);
2757 adapter->stats.scc += er32(SCC);
2758 adapter->stats.ecol += er32(ECOL);
2759 adapter->stats.mcc += er32(MCC);
2760 adapter->stats.latecol += er32(LATECOL);
2761 adapter->stats.dc += er32(DC);
2762 adapter->stats.rlec += er32(RLEC);
2763 adapter->stats.xonrxc += er32(XONRXC);
2764 adapter->stats.xontxc += er32(XONTXC);
2765 adapter->stats.xoffrxc += er32(XOFFRXC);
2766 adapter->stats.xofftxc += er32(XOFFTXC);
2767 adapter->stats.fcruc += er32(FCRUC);
2768 adapter->stats.gptc += er32(GPTC);
2769 adapter->stats.gotcl += er32(GOTCL);
2770 adapter->stats.gotch += er32(GOTCH);
2771 adapter->stats.rnbc += er32(RNBC);
2772 adapter->stats.ruc += er32(RUC);
2773 adapter->stats.rfc += er32(RFC);
2774 adapter->stats.rjc += er32(RJC);
2775 adapter->stats.torl += er32(TORL);
2776 adapter->stats.torh += er32(TORH);
2777 adapter->stats.totl += er32(TOTL);
2778 adapter->stats.toth += er32(TOTH);
2779 adapter->stats.tpr += er32(TPR);
2780
2781 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2782 adapter->stats.ptc64 += er32(PTC64);
2783 adapter->stats.ptc127 += er32(PTC127);
2784 adapter->stats.ptc255 += er32(PTC255);
2785 adapter->stats.ptc511 += er32(PTC511);
2786 adapter->stats.ptc1023 += er32(PTC1023);
2787 adapter->stats.ptc1522 += er32(PTC1522);
2788 }
2789
2790 adapter->stats.mptc += er32(MPTC);
2791 adapter->stats.bptc += er32(BPTC);
2792
2793 /* used for adaptive IFS */
2794
2795 hw->mac.tx_packet_delta = er32(TPT);
2796 adapter->stats.tpt += hw->mac.tx_packet_delta;
2797 hw->mac.collision_delta = er32(COLC);
2798 adapter->stats.colc += hw->mac.collision_delta;
2799
2800 adapter->stats.algnerrc += er32(ALGNERRC);
2801 adapter->stats.rxerrc += er32(RXERRC);
2802 adapter->stats.tncrs += er32(TNCRS);
2803 adapter->stats.cexterr += er32(CEXTERR);
2804 adapter->stats.tsctc += er32(TSCTC);
2805 adapter->stats.tsctfc += er32(TSCTFC);
2806
2807 adapter->stats.iac += er32(IAC);
2808
2809 if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
2810 adapter->stats.icrxoc += er32(ICRXOC);
2811 adapter->stats.icrxptc += er32(ICRXPTC);
2812 adapter->stats.icrxatc += er32(ICRXATC);
2813 adapter->stats.ictxptc += er32(ICTXPTC);
2814 adapter->stats.ictxatc += er32(ICTXATC);
2815 adapter->stats.ictxqec += er32(ICTXQEC);
2816 adapter->stats.ictxqmtc += er32(ICTXQMTC);
2817 adapter->stats.icrxdmtc += er32(ICRXDMTC);
2818 }
2819
2820 /* Fill out the OS statistics structure */
2821 adapter->net_stats.rx_packets = adapter->stats.gprc;
2822 adapter->net_stats.tx_packets = adapter->stats.gptc;
2823 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2824 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2825 adapter->net_stats.multicast = adapter->stats.mprc;
2826 adapter->net_stats.collisions = adapter->stats.colc;
2827
2828 /* Rx Errors */
2829
2830 /* RLEC on some newer hardware can be incorrect so build
2831 * our own version based on RUC and ROC */
2832 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2833 adapter->stats.crcerrs + adapter->stats.algnerrc +
2834 adapter->stats.ruc + adapter->stats.roc +
2835 adapter->stats.cexterr;
2836 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
2837 adapter->stats.roc;
2838 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2839 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2840 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2841
2842 /* Tx Errors */
2843 adapter->net_stats.tx_errors = adapter->stats.ecol +
2844 adapter->stats.latecol;
2845 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2846 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2847 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2848
2849 /* Tx Dropped needs to be maintained elsewhere */
2850
2851 /* Phy Stats */
2852 if (hw->media_type == e1000_media_type_copper) {
2853 if ((adapter->link_speed == SPEED_1000) &&
2854 (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
2855 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2856 adapter->phy_stats.idle_errors += phy_tmp;
2857 }
2858 }
2859
2860 /* Management Stats */
2861 adapter->stats.mgptc += er32(MGTPTC);
2862 adapter->stats.mgprc += er32(MGTPRC);
2863 adapter->stats.mgpdc += er32(MGTPDC);
2864
2865 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
2866}
2867
2868static void e1000_print_link_info(struct e1000_adapter *adapter)
2869{
2870 struct net_device *netdev = adapter->netdev;
2871 struct e1000_hw *hw = &adapter->hw;
2872 u32 ctrl = er32(CTRL);
2873
2874 ndev_info(netdev,
2875 "Link is Up %d Mbps %s, Flow Control: %s\n",
2876 adapter->link_speed,
2877 (adapter->link_duplex == FULL_DUPLEX) ?
2878 "Full Duplex" : "Half Duplex",
2879 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2880 "RX/TX" :
2881 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2882 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2883}
2884
2885/**
2886 * e1000_watchdog - Timer Call-back
2887 * @data: pointer to adapter cast into an unsigned long
2888 **/
2889static void e1000_watchdog(unsigned long data)
2890{
2891 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2892
2893 /* Do the rest outside of interrupt context */
2894 schedule_work(&adapter->watchdog_task);
2895
2896 /* TODO: make this use queue_delayed_work() */
2897}
2898
2899static void e1000_watchdog_task(struct work_struct *work)
2900{
2901 struct e1000_adapter *adapter = container_of(work,
2902 struct e1000_adapter, watchdog_task);
2903
2904 struct net_device *netdev = adapter->netdev;
2905 struct e1000_mac_info *mac = &adapter->hw.mac;
2906 struct e1000_ring *tx_ring = adapter->tx_ring;
2907 struct e1000_hw *hw = &adapter->hw;
2908 u32 link, tctl;
2909 s32 ret_val;
2910 int tx_pending = 0;
2911
2912 if ((netif_carrier_ok(netdev)) &&
2913 (er32(STATUS) & E1000_STATUS_LU))
2914 goto link_up;
2915
2916 ret_val = mac->ops.check_for_link(hw);
2917 if ((ret_val == E1000_ERR_PHY) &&
2918 (adapter->hw.phy.type == e1000_phy_igp_3) &&
2919 (er32(CTRL) &
2920 E1000_PHY_CTRL_GBE_DISABLE)) {
2921 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2922 ndev_info(netdev,
2923 "Gigabit has been disabled, downgrading speed\n");
2924 }
2925
2926 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
2927 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
2928 e1000_update_mng_vlan(adapter);
2929
2930 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2931 !(er32(TXCW) & E1000_TXCW_ANE))
2932 link = adapter->hw.mac.serdes_has_link;
2933 else
2934 link = er32(STATUS) & E1000_STATUS_LU;
2935
2936 if (link) {
2937 if (!netif_carrier_ok(netdev)) {
2938 bool txb2b = 1;
2939 mac->ops.get_link_up_info(&adapter->hw,
2940 &adapter->link_speed,
2941 &adapter->link_duplex);
2942 e1000_print_link_info(adapter);
2943 /* tweak tx_queue_len according to speed/duplex
2944 * and adjust the timeout factor */
2945 netdev->tx_queue_len = adapter->tx_queue_len;
2946 adapter->tx_timeout_factor = 1;
2947 switch (adapter->link_speed) {
2948 case SPEED_10:
2949 txb2b = 0;
2950 netdev->tx_queue_len = 10;
2951 adapter->tx_timeout_factor = 14;
2952 break;
2953 case SPEED_100:
2954 txb2b = 0;
2955 netdev->tx_queue_len = 100;
2956 /* maybe add some timeout factor ? */
2957 break;
2958 }
2959
2960 /* workaround: re-program speed mode bit after
2961 * link-up event */
2962 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2963 !txb2b) {
2964 u32 tarc0;
2965 tarc0 = er32(TARC0);
2966 tarc0 &= ~SPEED_MODE_BIT;
2967 ew32(TARC0, tarc0);
2968 }
2969
2970 /* disable TSO for pcie and 10/100 speeds, to avoid
2971 * some hardware issues */
2972 if (!(adapter->flags & FLAG_TSO_FORCE)) {
2973 switch (adapter->link_speed) {
2974 case SPEED_10:
2975 case SPEED_100:
2976 ndev_info(netdev,
2977 "10/100 speed: disabling TSO\n");
2978 netdev->features &= ~NETIF_F_TSO;
2979 netdev->features &= ~NETIF_F_TSO6;
2980 break;
2981 case SPEED_1000:
2982 netdev->features |= NETIF_F_TSO;
2983 netdev->features |= NETIF_F_TSO6;
2984 break;
2985 default:
2986 /* oops */
2987 break;
2988 }
2989 }
2990
2991 /* enable transmits in the hardware, need to do this
2992 * after setting TARC0 */
2993 tctl = er32(TCTL);
2994 tctl |= E1000_TCTL_EN;
2995 ew32(TCTL, tctl);
2996
2997 netif_carrier_on(netdev);
2998 netif_wake_queue(netdev);
2999
3000 if (!test_bit(__E1000_DOWN, &adapter->state))
3001 mod_timer(&adapter->phy_info_timer,
3002 round_jiffies(jiffies + 2 * HZ));
3003 } else {
3004 /* make sure the receive unit is started */
3005 if (adapter->flags & FLAG_RX_NEEDS_RESTART) {
3006 u32 rctl = er32(RCTL);
3007 ew32(RCTL, rctl |
3008 E1000_RCTL_EN);
3009 }
3010 }
3011 } else {
3012 if (netif_carrier_ok(netdev)) {
3013 adapter->link_speed = 0;
3014 adapter->link_duplex = 0;
3015 ndev_info(netdev, "Link is Down\n");
3016 netif_carrier_off(netdev);
3017 netif_stop_queue(netdev);
3018 if (!test_bit(__E1000_DOWN, &adapter->state))
3019 mod_timer(&adapter->phy_info_timer,
3020 round_jiffies(jiffies + 2 * HZ));
3021
3022 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3023 schedule_work(&adapter->reset_task);
3024 }
3025 }
3026
3027link_up:
3028 e1000e_update_stats(adapter);
3029
3030 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3031 adapter->tpt_old = adapter->stats.tpt;
3032 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3033 adapter->colc_old = adapter->stats.colc;
3034
3035 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
3036 adapter->gorcl_old = adapter->stats.gorcl;
3037 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
3038 adapter->gotcl_old = adapter->stats.gotcl;
3039
3040 e1000e_update_adaptive(&adapter->hw);
3041
3042 if (!netif_carrier_ok(netdev)) {
3043 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3044 tx_ring->count);
3045 if (tx_pending) {
3046 /* We've lost link, so the controller stops DMA,
3047 * but we've got queued Tx work that's never going
3048 * to get done, so reset controller to flush Tx.
3049 * (Do the reset outside of interrupt context). */
3050 adapter->tx_timeout_count++;
3051 schedule_work(&adapter->reset_task);
3052 }
3053 }
3054
3055 /* Cause software interrupt to ensure rx ring is cleaned */
3056 ew32(ICS, E1000_ICS_RXDMT0);
3057
3058 /* Force detection of hung controller every watchdog period */
3059 adapter->detect_tx_hung = 1;
3060
3061 /* With 82571 controllers, LAA may be overwritten due to controller
3062 * reset from the other port. Set the appropriate LAA in RAR[0] */
3063 if (e1000e_get_laa_state_82571(hw))
3064 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3065
3066 /* Reset the timer */
3067 if (!test_bit(__E1000_DOWN, &adapter->state))
3068 mod_timer(&adapter->watchdog_timer,
3069 round_jiffies(jiffies + 2 * HZ));
3070}
3071
3072#define E1000_TX_FLAGS_CSUM 0x00000001
3073#define E1000_TX_FLAGS_VLAN 0x00000002
3074#define E1000_TX_FLAGS_TSO 0x00000004
3075#define E1000_TX_FLAGS_IPV4 0x00000008
3076#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3077#define E1000_TX_FLAGS_VLAN_SHIFT 16
3078
3079static int e1000_tso(struct e1000_adapter *adapter,
3080 struct sk_buff *skb)
3081{
3082 struct e1000_ring *tx_ring = adapter->tx_ring;
3083 struct e1000_context_desc *context_desc;
3084 struct e1000_buffer *buffer_info;
3085 unsigned int i;
3086 u32 cmd_length = 0;
3087 u16 ipcse = 0, tucse, mss;
3088 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3089 int err;
3090
3091 if (skb_is_gso(skb)) {
3092 if (skb_header_cloned(skb)) {
3093 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3094 if (err)
3095 return err;
3096 }
3097
3098 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3099 mss = skb_shinfo(skb)->gso_size;
3100 if (skb->protocol == htons(ETH_P_IP)) {
3101 struct iphdr *iph = ip_hdr(skb);
3102 iph->tot_len = 0;
3103 iph->check = 0;
3104 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3105 iph->daddr, 0,
3106 IPPROTO_TCP,
3107 0);
3108 cmd_length = E1000_TXD_CMD_IP;
3109 ipcse = skb_transport_offset(skb) - 1;
3110 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3111 ipv6_hdr(skb)->payload_len = 0;
3112 tcp_hdr(skb)->check =
3113 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3114 &ipv6_hdr(skb)->daddr,
3115 0, IPPROTO_TCP, 0);
3116 ipcse = 0;
3117 }
3118 ipcss = skb_network_offset(skb);
3119 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3120 tucss = skb_transport_offset(skb);
3121 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3122 tucse = 0;
3123
3124 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3125 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3126
3127 i = tx_ring->next_to_use;
3128 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3129 buffer_info = &tx_ring->buffer_info[i];
3130
3131 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3132 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3133 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3134 context_desc->upper_setup.tcp_fields.tucss = tucss;
3135 context_desc->upper_setup.tcp_fields.tucso = tucso;
3136 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3137 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3138 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3139 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3140
3141 buffer_info->time_stamp = jiffies;
3142 buffer_info->next_to_watch = i;
3143
3144 i++;
3145 if (i == tx_ring->count)
3146 i = 0;
3147 tx_ring->next_to_use = i;
3148
3149 return 1;
3150 }
3151
3152 return 0;
3153}
3154
3155static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3156{
3157 struct e1000_ring *tx_ring = adapter->tx_ring;
3158 struct e1000_context_desc *context_desc;
3159 struct e1000_buffer *buffer_info;
3160 unsigned int i;
3161 u8 css;
3162
3163 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3164 css = skb_transport_offset(skb);
3165
3166 i = tx_ring->next_to_use;
3167 buffer_info = &tx_ring->buffer_info[i];
3168 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3169
3170 context_desc->lower_setup.ip_config = 0;
3171 context_desc->upper_setup.tcp_fields.tucss = css;
3172 context_desc->upper_setup.tcp_fields.tucso =
3173 css + skb->csum_offset;
3174 context_desc->upper_setup.tcp_fields.tucse = 0;
3175 context_desc->tcp_seg_setup.data = 0;
3176 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
3177
3178 buffer_info->time_stamp = jiffies;
3179 buffer_info->next_to_watch = i;
3180
3181 i++;
3182 if (i == tx_ring->count)
3183 i = 0;
3184 tx_ring->next_to_use = i;
3185
3186 return 1;
3187 }
3188
3189 return 0;
3190}
3191
3192#define E1000_MAX_PER_TXD 8192
3193#define E1000_MAX_TXD_PWR 12
3194
3195static int e1000_tx_map(struct e1000_adapter *adapter,
3196 struct sk_buff *skb, unsigned int first,
3197 unsigned int max_per_txd, unsigned int nr_frags,
3198 unsigned int mss)
3199{
3200 struct e1000_ring *tx_ring = adapter->tx_ring;
3201 struct e1000_buffer *buffer_info;
3202 unsigned int len = skb->len - skb->data_len;
3203 unsigned int offset = 0, size, count = 0, i;
3204 unsigned int f;
3205
3206 i = tx_ring->next_to_use;
3207
3208 while (len) {
3209 buffer_info = &tx_ring->buffer_info[i];
3210 size = min(len, max_per_txd);
3211
3212 /* Workaround for premature desc write-backs
3213 * in TSO mode. Append 4-byte sentinel desc */
3214 if (mss && !nr_frags && size == len && size > 8)
3215 size -= 4;
3216
3217 buffer_info->length = size;
3218 /* set time_stamp *before* dma to help avoid a possible race */
3219 buffer_info->time_stamp = jiffies;
3220 buffer_info->dma =
3221 pci_map_single(adapter->pdev,
3222 skb->data + offset,
3223 size,
3224 PCI_DMA_TODEVICE);
3225 if (pci_dma_mapping_error(buffer_info->dma)) {
3226 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3227 adapter->tx_dma_failed++;
3228 return -1;
3229 }
3230 buffer_info->next_to_watch = i;
3231
3232 len -= size;
3233 offset += size;
3234 count++;
3235 i++;
3236 if (i == tx_ring->count)
3237 i = 0;
3238 }
3239
3240 for (f = 0; f < nr_frags; f++) {
3241 struct skb_frag_struct *frag;
3242
3243 frag = &skb_shinfo(skb)->frags[f];
3244 len = frag->size;
3245 offset = frag->page_offset;
3246
3247 while (len) {
3248 buffer_info = &tx_ring->buffer_info[i];
3249 size = min(len, max_per_txd);
3250 /* Workaround for premature desc write-backs
3251 * in TSO mode. Append 4-byte sentinel desc */
3252 if (mss && f == (nr_frags-1) && size == len && size > 8)
3253 size -= 4;
3254
3255 buffer_info->length = size;
3256 buffer_info->time_stamp = jiffies;
3257 buffer_info->dma =
3258 pci_map_page(adapter->pdev,
3259 frag->page,
3260 offset,
3261 size,
3262 PCI_DMA_TODEVICE);
3263 if (pci_dma_mapping_error(buffer_info->dma)) {
3264 dev_err(&adapter->pdev->dev,
3265 "TX DMA page map failed\n");
3266 adapter->tx_dma_failed++;
3267 return -1;
3268 }
3269
3270 buffer_info->next_to_watch = i;
3271
3272 len -= size;
3273 offset += size;
3274 count++;
3275
3276 i++;
3277 if (i == tx_ring->count)
3278 i = 0;
3279 }
3280 }
3281
3282 if (i == 0)
3283 i = tx_ring->count - 1;
3284 else
3285 i--;
3286
3287 tx_ring->buffer_info[i].skb = skb;
3288 tx_ring->buffer_info[first].next_to_watch = i;
3289
3290 return count;
3291}
3292
3293static void e1000_tx_queue(struct e1000_adapter *adapter,
3294 int tx_flags, int count)
3295{
3296 struct e1000_ring *tx_ring = adapter->tx_ring;
3297 struct e1000_tx_desc *tx_desc = NULL;
3298 struct e1000_buffer *buffer_info;
3299 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3300 unsigned int i;
3301
3302 if (tx_flags & E1000_TX_FLAGS_TSO) {
3303 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3304 E1000_TXD_CMD_TSE;
3305 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3306
3307 if (tx_flags & E1000_TX_FLAGS_IPV4)
3308 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3309 }
3310
3311 if (tx_flags & E1000_TX_FLAGS_CSUM) {
3312 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3313 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3314 }
3315
3316 if (tx_flags & E1000_TX_FLAGS_VLAN) {
3317 txd_lower |= E1000_TXD_CMD_VLE;
3318 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3319 }
3320
3321 i = tx_ring->next_to_use;
3322
3323 while (count--) {
3324 buffer_info = &tx_ring->buffer_info[i];
3325 tx_desc = E1000_TX_DESC(*tx_ring, i);
3326 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3327 tx_desc->lower.data =
3328 cpu_to_le32(txd_lower | buffer_info->length);
3329 tx_desc->upper.data = cpu_to_le32(txd_upper);
3330
3331 i++;
3332 if (i == tx_ring->count)
3333 i = 0;
3334 }
3335
3336 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3337
3338 /* Force memory writes to complete before letting h/w
3339 * know there are new descriptors to fetch. (Only
3340 * applicable for weak-ordered memory model archs,
3341 * such as IA-64). */
3342 wmb();
3343
3344 tx_ring->next_to_use = i;
3345 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3346 /* we need this if more than one processor can write to our tail
3347 * at a time, it synchronizes IO on IA64/Altix systems */
3348 mmiowb();
3349}
3350
3351#define MINIMUM_DHCP_PACKET_SIZE 282
3352static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3353 struct sk_buff *skb)
3354{
3355 struct e1000_hw *hw = &adapter->hw;
3356 u16 length, offset;
3357
3358 if (vlan_tx_tag_present(skb)) {
3359 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
3360 && (adapter->hw.mng_cookie.status &
3361 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
3362 return 0;
3363 }
3364
3365 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
3366 return 0;
3367
3368 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
3369 return 0;
3370
3371 {
3372 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
3373 struct udphdr *udp;
3374
3375 if (ip->protocol != IPPROTO_UDP)
3376 return 0;
3377
3378 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
3379 if (ntohs(udp->dest) != 67)
3380 return 0;
3381
3382 offset = (u8 *)udp + 8 - skb->data;
3383 length = skb->len - offset;
3384 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
3385 }
3386
3387 return 0;
3388}
3389
3390static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3391{
3392 struct e1000_adapter *adapter = netdev_priv(netdev);
3393
3394 netif_stop_queue(netdev);
3395 /* Herbert's original patch had:
3396 * smp_mb__after_netif_stop_queue();
3397 * but since that doesn't exist yet, just open code it. */
3398 smp_mb();
3399
3400 /* We need to check again in a case another CPU has just
3401 * made room available. */
3402 if (e1000_desc_unused(adapter->tx_ring) < size)
3403 return -EBUSY;
3404
3405 /* A reprieve! */
3406 netif_start_queue(netdev);
3407 ++adapter->restart_queue;
3408 return 0;
3409}
3410
3411static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
3412{
3413 struct e1000_adapter *adapter = netdev_priv(netdev);
3414
3415 if (e1000_desc_unused(adapter->tx_ring) >= size)
3416 return 0;
3417 return __e1000_maybe_stop_tx(netdev, size);
3418}
3419
3420#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3421static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3422{
3423 struct e1000_adapter *adapter = netdev_priv(netdev);
3424 struct e1000_ring *tx_ring = adapter->tx_ring;
3425 unsigned int first;
3426 unsigned int max_per_txd = E1000_MAX_PER_TXD;
3427 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3428 unsigned int tx_flags = 0;
3429 unsigned int len = skb->len;
3430 unsigned long irq_flags;
3431 unsigned int nr_frags = 0;
3432 unsigned int mss = 0;
3433 int count = 0;
3434 int tso;
3435 unsigned int f;
3436 len -= skb->data_len;
3437
3438 if (test_bit(__E1000_DOWN, &adapter->state)) {
3439 dev_kfree_skb_any(skb);
3440 return NETDEV_TX_OK;
3441 }
3442
3443 if (skb->len <= 0) {
3444 dev_kfree_skb_any(skb);
3445 return NETDEV_TX_OK;
3446 }
3447
3448 mss = skb_shinfo(skb)->gso_size;
3449 /* The controller does a simple calculation to
3450 * make sure there is enough room in the FIFO before
3451 * initiating the DMA for each buffer. The calc is:
3452 * 4 = ceil(buffer len/mss). To make sure we don't
3453 * overrun the FIFO, adjust the max buffer len if mss
3454 * drops. */
3455 if (mss) {
3456 u8 hdr_len;
3457 max_per_txd = min(mss << 2, max_per_txd);
3458 max_txd_pwr = fls(max_per_txd) - 1;
3459
3460 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3461 * points to just header, pull a few bytes of payload from
3462 * frags into skb->data */
3463 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3464 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3465 unsigned int pull_size;
3466
3467 pull_size = min((unsigned int)4, skb->data_len);
3468 if (!__pskb_pull_tail(skb, pull_size)) {
3469 ndev_err(netdev,
3470 "__pskb_pull_tail failed.\n");
3471 dev_kfree_skb_any(skb);
3472 return NETDEV_TX_OK;
3473 }
3474 len = skb->len - skb->data_len;
3475 }
3476 }
3477
3478 /* reserve a descriptor for the offload context */
3479 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3480 count++;
3481 count++;
3482
3483 count += TXD_USE_COUNT(len, max_txd_pwr);
3484
3485 nr_frags = skb_shinfo(skb)->nr_frags;
3486 for (f = 0; f < nr_frags; f++)
3487 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3488 max_txd_pwr);
3489
3490 if (adapter->hw.mac.tx_pkt_filtering)
3491 e1000_transfer_dhcp_info(adapter, skb);
3492
3493 if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
3494 /* Collision - tell upper layer to requeue */
3495 return NETDEV_TX_LOCKED;
3496
3497 /* need: count + 2 desc gap to keep tail from touching
3498 * head, otherwise try next time */
3499 if (e1000_maybe_stop_tx(netdev, count + 2)) {
3500 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3501 return NETDEV_TX_BUSY;
3502 }
3503
3504 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3505 tx_flags |= E1000_TX_FLAGS_VLAN;
3506 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3507 }
3508
3509 first = tx_ring->next_to_use;
3510
3511 tso = e1000_tso(adapter, skb);
3512 if (tso < 0) {
3513 dev_kfree_skb_any(skb);
3514 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3515 return NETDEV_TX_OK;
3516 }
3517
3518 if (tso)
3519 tx_flags |= E1000_TX_FLAGS_TSO;
3520 else if (e1000_tx_csum(adapter, skb))
3521 tx_flags |= E1000_TX_FLAGS_CSUM;
3522
3523 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3524 * 82571 hardware supports TSO capabilities for IPv6 as well...
3525 * no longer assume, we must. */
3526 if (skb->protocol == htons(ETH_P_IP))
3527 tx_flags |= E1000_TX_FLAGS_IPV4;
3528
3529 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
3530 if (count < 0) {
3531 /* handle pci_map_single() error in e1000_tx_map */
3532 dev_kfree_skb_any(skb);
3533 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3534 return NETDEV_TX_BUSY;
3535 }
3536
3537 e1000_tx_queue(adapter, tx_flags, count);
3538
3539 netdev->trans_start = jiffies;
3540
3541 /* Make sure there is space in the ring for the next send. */
3542 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
3543
3544 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3545 return NETDEV_TX_OK;
3546}
3547
3548/**
3549 * e1000_tx_timeout - Respond to a Tx Hang
3550 * @netdev: network interface device structure
3551 **/
3552static void e1000_tx_timeout(struct net_device *netdev)
3553{
3554 struct e1000_adapter *adapter = netdev_priv(netdev);
3555
3556 /* Do the reset outside of interrupt context */
3557 adapter->tx_timeout_count++;
3558 schedule_work(&adapter->reset_task);
3559}
3560
3561static void e1000_reset_task(struct work_struct *work)
3562{
3563 struct e1000_adapter *adapter;
3564 adapter = container_of(work, struct e1000_adapter, reset_task);
3565
3566 e1000e_reinit_locked(adapter);
3567}
3568
3569/**
3570 * e1000_get_stats - Get System Network Statistics
3571 * @netdev: network interface device structure
3572 *
3573 * Returns the address of the device statistics structure.
3574 * The statistics are actually updated from the timer callback.
3575 **/
3576static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3577{
3578 struct e1000_adapter *adapter = netdev_priv(netdev);
3579
3580 /* only return the current stats */
3581 return &adapter->net_stats;
3582}
3583
3584/**
3585 * e1000_change_mtu - Change the Maximum Transfer Unit
3586 * @netdev: network interface device structure
3587 * @new_mtu: new value for maximum frame size
3588 *
3589 * Returns 0 on success, negative on failure
3590 **/
3591static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3592{
3593 struct e1000_adapter *adapter = netdev_priv(netdev);
3594 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3595
3596 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3597 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3598 ndev_err(netdev, "Invalid MTU setting\n");
3599 return -EINVAL;
3600 }
3601
3602 /* Jumbo frame size limits */
3603 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3604 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3605 ndev_err(netdev, "Jumbo Frames not supported.\n");
3606 return -EINVAL;
3607 }
3608 if (adapter->hw.phy.type == e1000_phy_ife) {
3609 ndev_err(netdev, "Jumbo Frames not supported.\n");
3610 return -EINVAL;
3611 }
3612 }
3613
3614#define MAX_STD_JUMBO_FRAME_SIZE 9234
3615 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3616 ndev_err(netdev, "MTU > 9216 not supported.\n");
3617 return -EINVAL;
3618 }
3619
3620 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3621 msleep(1);
3622 /* e1000e_down has a dependency on max_frame_size */
3623 adapter->hw.mac.max_frame_size = max_frame;
3624 if (netif_running(netdev))
3625 e1000e_down(adapter);
3626
3627 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3628 * means we reserve 2 more, this pushes us to allocate from the next
3629 * larger slab size.
3630 * i.e. RXBUFFER_2048 --> size-4096 slab
3631 * however with the new *_jumbo* routines, jumbo receives will use
3632 * fragmented skbs */
3633
3634 if (max_frame <= 256)
3635 adapter->rx_buffer_len = 256;
3636 else if (max_frame <= 512)
3637 adapter->rx_buffer_len = 512;
3638 else if (max_frame <= 1024)
3639 adapter->rx_buffer_len = 1024;
3640 else if (max_frame <= 2048)
3641 adapter->rx_buffer_len = 2048;
3642 else
3643 adapter->rx_buffer_len = 4096;
3644
3645 /* adjust allocation if LPE protects us, and we aren't using SBP */
3646 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3647 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3648 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3649 + ETH_FCS_LEN ;
3650
3651 ndev_info(netdev, "changing MTU from %d to %d\n",
3652 netdev->mtu, new_mtu);
3653 netdev->mtu = new_mtu;
3654
3655 if (netif_running(netdev))
3656 e1000e_up(adapter);
3657 else
3658 e1000e_reset(adapter);
3659
3660 clear_bit(__E1000_RESETTING, &adapter->state);
3661
3662 return 0;
3663}
3664
3665static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3666 int cmd)
3667{
3668 struct e1000_adapter *adapter = netdev_priv(netdev);
3669 struct mii_ioctl_data *data = if_mii(ifr);
3670 unsigned long irq_flags;
3671
3672 if (adapter->hw.media_type != e1000_media_type_copper)
3673 return -EOPNOTSUPP;
3674
3675 switch (cmd) {
3676 case SIOCGMIIPHY:
3677 data->phy_id = adapter->hw.phy.addr;
3678 break;
3679 case SIOCGMIIREG:
3680 if (!capable(CAP_NET_ADMIN))
3681 return -EPERM;
3682 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
3683 if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F,
3684 &data->val_out)) {
3685 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3686 return -EIO;
3687 }
3688 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3689 break;
3690 case SIOCSMIIREG:
3691 default:
3692 return -EOPNOTSUPP;
3693 }
3694 return 0;
3695}
3696
3697static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3698{
3699 switch (cmd) {
3700 case SIOCGMIIPHY:
3701 case SIOCGMIIREG:
3702 case SIOCSMIIREG:
3703 return e1000_mii_ioctl(netdev, ifr, cmd);
3704 default:
3705 return -EOPNOTSUPP;
3706 }
3707}
3708
3709static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3710{
3711 struct net_device *netdev = pci_get_drvdata(pdev);
3712 struct e1000_adapter *adapter = netdev_priv(netdev);
3713 struct e1000_hw *hw = &adapter->hw;
3714 u32 ctrl, ctrl_ext, rctl, status;
3715 u32 wufc = adapter->wol;
3716 int retval = 0;
3717
3718 netif_device_detach(netdev);
3719
3720 if (netif_running(netdev)) {
3721 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3722 e1000e_down(adapter);
3723 e1000_free_irq(adapter);
3724 }
3725
3726 retval = pci_save_state(pdev);
3727 if (retval)
3728 return retval;
3729
3730 status = er32(STATUS);
3731 if (status & E1000_STATUS_LU)
3732 wufc &= ~E1000_WUFC_LNKC;
3733
3734 if (wufc) {
3735 e1000_setup_rctl(adapter);
3736 e1000_set_multi(netdev);
3737
3738 /* turn on all-multi mode if wake on multicast is enabled */
3739 if (wufc & E1000_WUFC_MC) {
3740 rctl = er32(RCTL);
3741 rctl |= E1000_RCTL_MPE;
3742 ew32(RCTL, rctl);
3743 }
3744
3745 ctrl = er32(CTRL);
3746 /* advertise wake from D3Cold */
3747 #define E1000_CTRL_ADVD3WUC 0x00100000
3748 /* phy power management enable */
3749 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3750 ctrl |= E1000_CTRL_ADVD3WUC |
3751 E1000_CTRL_EN_PHY_PWR_MGMT;
3752 ew32(CTRL, ctrl);
3753
3754 if (adapter->hw.media_type == e1000_media_type_fiber ||
3755 adapter->hw.media_type == e1000_media_type_internal_serdes) {
3756 /* keep the laser running in D3 */
3757 ctrl_ext = er32(CTRL_EXT);
3758 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3759 ew32(CTRL_EXT, ctrl_ext);
3760 }
3761
3762 /* Allow time for pending master requests to run */
3763 e1000e_disable_pcie_master(&adapter->hw);
3764
3765 ew32(WUC, E1000_WUC_PME_EN);
3766 ew32(WUFC, wufc);
3767 pci_enable_wake(pdev, PCI_D3hot, 1);
3768 pci_enable_wake(pdev, PCI_D3cold, 1);
3769 } else {
3770 ew32(WUC, 0);
3771 ew32(WUFC, 0);
3772 pci_enable_wake(pdev, PCI_D3hot, 0);
3773 pci_enable_wake(pdev, PCI_D3cold, 0);
3774 }
3775
3776 e1000_release_manageability(adapter);
3777
3778 /* make sure adapter isn't asleep if manageability is enabled */
3779 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
3780 pci_enable_wake(pdev, PCI_D3hot, 1);
3781 pci_enable_wake(pdev, PCI_D3cold, 1);
3782 }
3783
3784 if (adapter->hw.phy.type == e1000_phy_igp_3)
3785 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3786
3787 /* Release control of h/w to f/w. If f/w is AMT enabled, this
3788 * would have already happened in close and is redundant. */
3789 e1000_release_hw_control(adapter);
3790
3791 pci_disable_device(pdev);
3792
3793 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3794
3795 return 0;
3796}
3797
3798#ifdef CONFIG_PM
3799static int e1000_resume(struct pci_dev *pdev)
3800{
3801 struct net_device *netdev = pci_get_drvdata(pdev);
3802 struct e1000_adapter *adapter = netdev_priv(netdev);
3803 struct e1000_hw *hw = &adapter->hw;
3804 u32 err;
3805
3806 pci_set_power_state(pdev, PCI_D0);
3807 pci_restore_state(pdev);
3808 err = pci_enable_device(pdev);
3809 if (err) {
3810 dev_err(&pdev->dev,
3811 "Cannot enable PCI device from suspend\n");
3812 return err;
3813 }
3814
3815 pci_set_master(pdev);
3816
3817 pci_enable_wake(pdev, PCI_D3hot, 0);
3818 pci_enable_wake(pdev, PCI_D3cold, 0);
3819
3820 if (netif_running(netdev)) {
3821 err = e1000_request_irq(adapter);
3822 if (err)
3823 return err;
3824 }
3825
3826 e1000e_power_up_phy(adapter);
3827 e1000e_reset(adapter);
3828 ew32(WUS, ~0);
3829
3830 e1000_init_manageability(adapter);
3831
3832 if (netif_running(netdev))
3833 e1000e_up(adapter);
3834
3835 netif_device_attach(netdev);
3836
3837 /* If the controller has AMT, do not set DRV_LOAD until the interface
3838 * is up. For all other cases, let the f/w know that the h/w is now
3839 * under the control of the driver. */
3840 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3841 e1000_get_hw_control(adapter);
3842
3843 return 0;
3844}
3845#endif
3846
3847static void e1000_shutdown(struct pci_dev *pdev)
3848{
3849 e1000_suspend(pdev, PMSG_SUSPEND);
3850}
3851
3852#ifdef CONFIG_NET_POLL_CONTROLLER
3853/*
3854 * Polling 'interrupt' - used by things like netconsole to send skbs
3855 * without having to re-enable interrupts. It's not called while
3856 * the interrupt routine is executing.
3857 */
3858static void e1000_netpoll(struct net_device *netdev)
3859{
3860 struct e1000_adapter *adapter = netdev_priv(netdev);
3861
3862 disable_irq(adapter->pdev->irq);
3863 e1000_intr(adapter->pdev->irq, netdev);
3864
3865 e1000_clean_tx_irq(adapter);
3866
3867 enable_irq(adapter->pdev->irq);
3868}
3869#endif
3870
3871/**
3872 * e1000_io_error_detected - called when PCI error is detected
3873 * @pdev: Pointer to PCI device
3874 * @state: The current pci connection state
3875 *
3876 * This function is called after a PCI bus error affecting
3877 * this device has been detected.
3878 */
3879static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
3880 pci_channel_state_t state)
3881{
3882 struct net_device *netdev = pci_get_drvdata(pdev);
3883 struct e1000_adapter *adapter = netdev_priv(netdev);
3884
3885 netif_device_detach(netdev);
3886
3887 if (netif_running(netdev))
3888 e1000e_down(adapter);
3889 pci_disable_device(pdev);
3890
3891 /* Request a slot slot reset. */
3892 return PCI_ERS_RESULT_NEED_RESET;
3893}
3894
3895/**
3896 * e1000_io_slot_reset - called after the pci bus has been reset.
3897 * @pdev: Pointer to PCI device
3898 *
3899 * Restart the card from scratch, as if from a cold-boot. Implementation
3900 * resembles the first-half of the e1000_resume routine.
3901 */
3902static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
3903{
3904 struct net_device *netdev = pci_get_drvdata(pdev);
3905 struct e1000_adapter *adapter = netdev_priv(netdev);
3906 struct e1000_hw *hw = &adapter->hw;
3907
3908 if (pci_enable_device(pdev)) {
3909 dev_err(&pdev->dev,
3910 "Cannot re-enable PCI device after reset.\n");
3911 return PCI_ERS_RESULT_DISCONNECT;
3912 }
3913 pci_set_master(pdev);
3914
3915 pci_enable_wake(pdev, PCI_D3hot, 0);
3916 pci_enable_wake(pdev, PCI_D3cold, 0);
3917
3918 e1000e_reset(adapter);
3919 ew32(WUS, ~0);
3920
3921 return PCI_ERS_RESULT_RECOVERED;
3922}
3923
3924/**
3925 * e1000_io_resume - called when traffic can start flowing again.
3926 * @pdev: Pointer to PCI device
3927 *
3928 * This callback is called when the error recovery driver tells us that
3929 * its OK to resume normal operation. Implementation resembles the
3930 * second-half of the e1000_resume routine.
3931 */
3932static void e1000_io_resume(struct pci_dev *pdev)
3933{
3934 struct net_device *netdev = pci_get_drvdata(pdev);
3935 struct e1000_adapter *adapter = netdev_priv(netdev);
3936
3937 e1000_init_manageability(adapter);
3938
3939 if (netif_running(netdev)) {
3940 if (e1000e_up(adapter)) {
3941 dev_err(&pdev->dev,
3942 "can't bring device back up after reset\n");
3943 return;
3944 }
3945 }
3946
3947 netif_device_attach(netdev);
3948
3949 /* If the controller has AMT, do not set DRV_LOAD until the interface
3950 * is up. For all other cases, let the f/w know that the h/w is now
3951 * under the control of the driver. */
3952 if (!(adapter->flags & FLAG_HAS_AMT) ||
3953 !e1000e_check_mng_mode(&adapter->hw))
3954 e1000_get_hw_control(adapter);
3955
3956}
3957
3958static void e1000_print_device_info(struct e1000_adapter *adapter)
3959{
3960 struct e1000_hw *hw = &adapter->hw;
3961 struct net_device *netdev = adapter->netdev;
3962 u32 part_num;
3963
3964 /* print bus type/speed/width info */
3965 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
3966 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3967 /* bus width */
3968 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
3969 "Width x1"),
3970 /* MAC address */
3971 netdev->dev_addr[0], netdev->dev_addr[1],
3972 netdev->dev_addr[2], netdev->dev_addr[3],
3973 netdev->dev_addr[4], netdev->dev_addr[5]);
3974 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
3975 (hw->phy.type == e1000_phy_ife)
3976 ? "10/100" : "1000");
3977 e1000e_read_part_num(hw, &part_num);
3978 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3979 hw->mac.type, hw->phy.type,
3980 (part_num >> 8), (part_num & 0xff));
3981}
3982
3983/**
3984 * e1000_probe - Device Initialization Routine
3985 * @pdev: PCI device information struct
3986 * @ent: entry in e1000_pci_tbl
3987 *
3988 * Returns 0 on success, negative on failure
3989 *
3990 * e1000_probe initializes an adapter identified by a pci_dev structure.
3991 * The OS initialization, configuring of the adapter private structure,
3992 * and a hardware reset occur.
3993 **/
3994static int __devinit e1000_probe(struct pci_dev *pdev,
3995 const struct pci_device_id *ent)
3996{
3997 struct net_device *netdev;
3998 struct e1000_adapter *adapter;
3999 struct e1000_hw *hw;
4000 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4001 unsigned long mmio_start, mmio_len;
4002 unsigned long flash_start, flash_len;
4003
4004 static int cards_found;
4005 int i, err, pci_using_dac;
4006 u16 eeprom_data = 0;
4007 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4008
4009 err = pci_enable_device(pdev);
4010 if (err)
4011 return err;
4012
4013 pci_using_dac = 0;
4014 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4015 if (!err) {
4016 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4017 if (!err)
4018 pci_using_dac = 1;
4019 } else {
4020 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4021 if (err) {
4022 err = pci_set_consistent_dma_mask(pdev,
4023 DMA_32BIT_MASK);
4024 if (err) {
4025 dev_err(&pdev->dev, "No usable DMA "
4026 "configuration, aborting\n");
4027 goto err_dma;
4028 }
4029 }
4030 }
4031
4032 err = pci_request_regions(pdev, e1000e_driver_name);
4033 if (err)
4034 goto err_pci_reg;
4035
4036 pci_set_master(pdev);
4037
4038 err = -ENOMEM;
4039 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
4040 if (!netdev)
4041 goto err_alloc_etherdev;
4042
4043 SET_MODULE_OWNER(netdev);
4044 SET_NETDEV_DEV(netdev, &pdev->dev);
4045
4046 pci_set_drvdata(pdev, netdev);
4047 adapter = netdev_priv(netdev);
4048 hw = &adapter->hw;
4049 adapter->netdev = netdev;
4050 adapter->pdev = pdev;
4051 adapter->ei = ei;
4052 adapter->pba = ei->pba;
4053 adapter->flags = ei->flags;
4054 adapter->hw.adapter = adapter;
4055 adapter->hw.mac.type = ei->mac;
4056 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4057
4058 mmio_start = pci_resource_start(pdev, 0);
4059 mmio_len = pci_resource_len(pdev, 0);
4060
4061 err = -EIO;
4062 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
4063 if (!adapter->hw.hw_addr)
4064 goto err_ioremap;
4065
4066 if ((adapter->flags & FLAG_HAS_FLASH) &&
4067 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
4068 flash_start = pci_resource_start(pdev, 1);
4069 flash_len = pci_resource_len(pdev, 1);
4070 adapter->hw.flash_address = ioremap(flash_start, flash_len);
4071 if (!adapter->hw.flash_address)
4072 goto err_flashmap;
4073 }
4074
4075 /* construct the net_device struct */
4076 netdev->open = &e1000_open;
4077 netdev->stop = &e1000_close;
4078 netdev->hard_start_xmit = &e1000_xmit_frame;
4079 netdev->get_stats = &e1000_get_stats;
4080 netdev->set_multicast_list = &e1000_set_multi;
4081 netdev->set_mac_address = &e1000_set_mac;
4082 netdev->change_mtu = &e1000_change_mtu;
4083 netdev->do_ioctl = &e1000_ioctl;
4084 e1000e_set_ethtool_ops(netdev);
4085 netdev->tx_timeout = &e1000_tx_timeout;
4086 netdev->watchdog_timeo = 5 * HZ;
4087 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
4088 netdev->vlan_rx_register = e1000_vlan_rx_register;
4089 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
4090 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
4091#ifdef CONFIG_NET_POLL_CONTROLLER
4092 netdev->poll_controller = e1000_netpoll;
4093#endif
4094 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
4095
4096 netdev->mem_start = mmio_start;
4097 netdev->mem_end = mmio_start + mmio_len;
4098
4099 adapter->bd_number = cards_found++;
4100
4101 /* setup adapter struct */
4102 err = e1000_sw_init(adapter);
4103 if (err)
4104 goto err_sw_init;
4105
4106 err = -EIO;
4107
4108 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
4109 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
4110 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
4111
4112 err = ei->get_invariants(adapter);
4113 if (err)
4114 goto err_hw_init;
4115
4116 hw->mac.ops.get_bus_info(&adapter->hw);
4117
4118 adapter->hw.phy.wait_for_link = 0;
4119
4120 /* Copper options */
4121 if (adapter->hw.media_type == e1000_media_type_copper) {
4122 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4123 adapter->hw.phy.disable_polarity_correction = 0;
4124 adapter->hw.phy.ms_type = e1000_ms_hw_default;
4125 }
4126
4127 if (e1000_check_reset_block(&adapter->hw))
4128 ndev_info(netdev,
4129 "PHY reset is blocked due to SOL/IDER session.\n");
4130
4131 netdev->features = NETIF_F_SG |
4132 NETIF_F_HW_CSUM |
4133 NETIF_F_HW_VLAN_TX |
4134 NETIF_F_HW_VLAN_RX;
4135
4136 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
4137 netdev->features |= NETIF_F_HW_VLAN_FILTER;
4138
4139 netdev->features |= NETIF_F_TSO;
4140 netdev->features |= NETIF_F_TSO6;
4141
4142 if (pci_using_dac)
4143 netdev->features |= NETIF_F_HIGHDMA;
4144
4145 /* We should not be using LLTX anymore, but we are still TX faster with
4146 * it. */
4147 netdev->features |= NETIF_F_LLTX;
4148
4149 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4150 adapter->flags |= FLAG_MNG_PT_ENABLED;
4151
4152 /* before reading the NVM, reset the controller to
4153 * put the device in a known good starting state */
4154 adapter->hw.mac.ops.reset_hw(&adapter->hw);
4155
4156 /*
4157 * systems with ASPM and others may see the checksum fail on the first
4158 * attempt. Let's give it a few tries
4159 */
4160 for (i = 0;; i++) {
4161 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4162 break;
4163 if (i == 2) {
4164 ndev_err(netdev, "The NVM Checksum Is Not Valid\n");
4165 err = -EIO;
4166 goto err_eeprom;
4167 }
4168 }
4169
4170 /* copy the MAC address out of the NVM */
4171 if (e1000e_read_mac_addr(&adapter->hw))
4172 ndev_err(netdev, "NVM Read Error while reading MAC address\n");
4173
4174 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4175 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4176
4177 if (!is_valid_ether_addr(netdev->perm_addr)) {
4178 ndev_err(netdev, "Invalid MAC Address: "
4179 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4180 netdev->perm_addr[0], netdev->perm_addr[1],
4181 netdev->perm_addr[2], netdev->perm_addr[3],
4182 netdev->perm_addr[4], netdev->perm_addr[5]);
4183 err = -EIO;
4184 goto err_eeprom;
4185 }
4186
4187 init_timer(&adapter->watchdog_timer);
4188 adapter->watchdog_timer.function = &e1000_watchdog;
4189 adapter->watchdog_timer.data = (unsigned long) adapter;
4190
4191 init_timer(&adapter->phy_info_timer);
4192 adapter->phy_info_timer.function = &e1000_update_phy_info;
4193 adapter->phy_info_timer.data = (unsigned long) adapter;
4194
4195 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4196 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4197
4198 e1000e_check_options(adapter);
4199
4200 /* Initialize link parameters. User can change them with ethtool */
4201 adapter->hw.mac.autoneg = 1;
4202 adapter->hw.mac.original_fc = e1000_fc_default;
4203 adapter->hw.mac.fc = e1000_fc_default;
4204 adapter->hw.phy.autoneg_advertised = 0x2f;
4205
4206 /* ring size defaults */
4207 adapter->rx_ring->count = 256;
4208 adapter->tx_ring->count = 256;
4209
4210 /*
4211 * Initial Wake on LAN setting - If APM wake is enabled in
4212 * the EEPROM, enable the ACPI Magic Packet filter
4213 */
4214 if (adapter->flags & FLAG_APME_IN_WUC) {
4215 /* APME bit in EEPROM is mapped to WUC.APME */
4216 eeprom_data = er32(WUC);
4217 eeprom_apme_mask = E1000_WUC_APME;
4218 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
4219 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
4220 (adapter->hw.bus.func == 1))
4221 e1000_read_nvm(&adapter->hw,
4222 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4223 else
4224 e1000_read_nvm(&adapter->hw,
4225 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4226 }
4227
4228 /* fetch WoL from EEPROM */
4229 if (eeprom_data & eeprom_apme_mask)
4230 adapter->eeprom_wol |= E1000_WUFC_MAG;
4231
4232 /*
4233 * now that we have the eeprom settings, apply the special cases
4234 * where the eeprom may be wrong or the board simply won't support
4235 * wake on lan on a particular port
4236 */
4237 if (!(adapter->flags & FLAG_HAS_WOL))
4238 adapter->eeprom_wol = 0;
4239
4240 /* initialize the wol settings based on the eeprom settings */
4241 adapter->wol = adapter->eeprom_wol;
4242
4243 /* reset the hardware with the new settings */
4244 e1000e_reset(adapter);
4245
4246 /* If the controller has AMT, do not set DRV_LOAD until the interface
4247 * is up. For all other cases, let the f/w know that the h/w is now
4248 * under the control of the driver. */
4249 if (!(adapter->flags & FLAG_HAS_AMT) ||
4250 !e1000e_check_mng_mode(&adapter->hw))
4251 e1000_get_hw_control(adapter);
4252
4253 /* tell the stack to leave us alone until e1000_open() is called */
4254 netif_carrier_off(netdev);
4255 netif_stop_queue(netdev);
4256
4257 strcpy(netdev->name, "eth%d");
4258 err = register_netdev(netdev);
4259 if (err)
4260 goto err_register;
4261
4262 e1000_print_device_info(adapter);
4263
4264 return 0;
4265
4266err_register:
4267err_hw_init:
4268 e1000_release_hw_control(adapter);
4269err_eeprom:
4270 if (!e1000_check_reset_block(&adapter->hw))
4271 e1000_phy_hw_reset(&adapter->hw);
4272
4273 if (adapter->hw.flash_address)
4274 iounmap(adapter->hw.flash_address);
4275
4276err_flashmap:
4277 kfree(adapter->tx_ring);
4278 kfree(adapter->rx_ring);
4279err_sw_init:
4280 iounmap(adapter->hw.hw_addr);
4281err_ioremap:
4282 free_netdev(netdev);
4283err_alloc_etherdev:
4284 pci_release_regions(pdev);
4285err_pci_reg:
4286err_dma:
4287 pci_disable_device(pdev);
4288 return err;
4289}
4290
4291/**
4292 * e1000_remove - Device Removal Routine
4293 * @pdev: PCI device information struct
4294 *
4295 * e1000_remove is called by the PCI subsystem to alert the driver
4296 * that it should release a PCI device. The could be caused by a
4297 * Hot-Plug event, or because the driver is going to be removed from
4298 * memory.
4299 **/
4300static void __devexit e1000_remove(struct pci_dev *pdev)
4301{
4302 struct net_device *netdev = pci_get_drvdata(pdev);
4303 struct e1000_adapter *adapter = netdev_priv(netdev);
4304
4305 /* flush_scheduled work may reschedule our watchdog task, so
4306 * explicitly disable watchdog tasks from being rescheduled */
4307 set_bit(__E1000_DOWN, &adapter->state);
4308 del_timer_sync(&adapter->watchdog_timer);
4309 del_timer_sync(&adapter->phy_info_timer);
4310
4311 flush_scheduled_work();
4312
4313 e1000_release_manageability(adapter);
4314
4315 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4316 * would have already happened in close and is redundant. */
4317 e1000_release_hw_control(adapter);
4318
4319 unregister_netdev(netdev);
4320
4321 if (!e1000_check_reset_block(&adapter->hw))
4322 e1000_phy_hw_reset(&adapter->hw);
4323
4324 kfree(adapter->tx_ring);
4325 kfree(adapter->rx_ring);
4326
4327 iounmap(adapter->hw.hw_addr);
4328 if (adapter->hw.flash_address)
4329 iounmap(adapter->hw.flash_address);
4330 pci_release_regions(pdev);
4331
4332 free_netdev(netdev);
4333
4334 pci_disable_device(pdev);
4335}
4336
4337/* PCI Error Recovery (ERS) */
4338static struct pci_error_handlers e1000_err_handler = {
4339 .error_detected = e1000_io_error_detected,
4340 .slot_reset = e1000_io_slot_reset,
4341 .resume = e1000_io_resume,
4342};
4343
4344static struct pci_device_id e1000_pci_tbl[] = {
4345 /*
4346 * Support for 82571/2/3, es2lan and ich8 will be phased in
4347 * stepwise.
4348
4349 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
4350 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
4351 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
4352 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
4353 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
4354 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
4355 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4356 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4357 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4358 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4359 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4360 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4363 board_80003es2lan },
4364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
4365 board_80003es2lan },
4366 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
4367 board_80003es2lan },
4368 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4369 board_80003es2lan },
4370 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4372 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
4373 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
4374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4377 */
4378
4379 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4380 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4381 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4382 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4383 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4384
4385 { } /* terminate list */
4386};
4387MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
4388
4389/* PCI Device API Driver */
4390static struct pci_driver e1000_driver = {
4391 .name = e1000e_driver_name,
4392 .id_table = e1000_pci_tbl,
4393 .probe = e1000_probe,
4394 .remove = __devexit_p(e1000_remove),
4395#ifdef CONFIG_PM
4396 /* Power Managment Hooks */
4397 .suspend = e1000_suspend,
4398 .resume = e1000_resume,
4399#endif
4400 .shutdown = e1000_shutdown,
4401 .err_handler = &e1000_err_handler
4402};
4403
4404/**
4405 * e1000_init_module - Driver Registration Routine
4406 *
4407 * e1000_init_module is the first routine called when the driver is
4408 * loaded. All it does is register with the PCI subsystem.
4409 **/
4410static int __init e1000_init_module(void)
4411{
4412 int ret;
4413 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4414 e1000e_driver_name, e1000e_driver_version);
4415 printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n",
4416 e1000e_driver_name);
4417 ret = pci_register_driver(&e1000_driver);
4418
4419 return ret;
4420}
4421module_init(e1000_init_module);
4422
4423/**
4424 * e1000_exit_module - Driver Exit Cleanup Routine
4425 *
4426 * e1000_exit_module is called just before the driver is removed
4427 * from memory.
4428 **/
4429static void __exit e1000_exit_module(void)
4430{
4431 pci_unregister_driver(&e1000_driver);
4432}
4433module_exit(e1000_exit_module);
4434
4435
4436MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
4437MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
4438MODULE_LICENSE("GPL");
4439MODULE_VERSION(DRV_VERSION);
4440
4441/* e1000_main.c */
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
new file mode 100644
index 000000000000..e4e655efb23c
--- /dev/null
+++ b/drivers/net/e1000e/param.c
@@ -0,0 +1,382 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30
31#include "e1000.h"
32
33/* This is the only thing that needs to be changed to adjust the
34 * maximum number of ports that the driver can manage.
35 */
36
37#define E1000_MAX_NIC 32
38
39#define OPTION_UNSET -1
40#define OPTION_DISABLED 0
41#define OPTION_ENABLED 1
42
43#define COPYBREAK_DEFAULT 256
44unsigned int copybreak = COPYBREAK_DEFAULT;
45module_param(copybreak, uint, 0644);
46MODULE_PARM_DESC(copybreak,
47 "Maximum size of packet that is copied to a new buffer on receive");
48
49/* All parameters are treated the same, as an integer array of values.
50 * This macro just reduces the need to repeat the same declaration code
51 * over and over (plus this helps to avoid typo bugs).
52 */
53
54#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
55#define E1000_PARAM(X, desc) \
56 static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
57 static int num_##X; \
58 module_param_array_named(X, X, int, &num_##X, 0); \
59 MODULE_PARM_DESC(X, desc);
60
61
62/* Transmit Interrupt Delay in units of 1.024 microseconds
63 * Tx interrupt delay needs to typically be set to something non zero
64 *
65 * Valid Range: 0-65535
66 */
67E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
68#define DEFAULT_TIDV 8
69#define MAX_TXDELAY 0xFFFF
70#define MIN_TXDELAY 0
71
72/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
73 *
74 * Valid Range: 0-65535
75 */
76E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
77#define DEFAULT_TADV 32
78#define MAX_TXABSDELAY 0xFFFF
79#define MIN_TXABSDELAY 0
80
81/* Receive Interrupt Delay in units of 1.024 microseconds
82 * hardware will likely hang if you set this to anything but zero.
83 *
84 * Valid Range: 0-65535
85 */
86E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
87#define DEFAULT_RDTR 0
88#define MAX_RXDELAY 0xFFFF
89#define MIN_RXDELAY 0
90
91/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
92 *
93 * Valid Range: 0-65535
94 */
95E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
96#define DEFAULT_RADV 8
97#define MAX_RXABSDELAY 0xFFFF
98#define MIN_RXABSDELAY 0
99
100/* Interrupt Throttle Rate (interrupts/sec)
101 *
102 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
103 */
104E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
105#define DEFAULT_ITR 3
106#define MAX_ITR 100000
107#define MIN_ITR 100
108
109/* Enable Smart Power Down of the PHY
110 *
111 * Valid Range: 0, 1
112 *
113 * Default Value: 0 (disabled)
114 */
115E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
116
117/* Enable Kumeran Lock Loss workaround
118 *
119 * Valid Range: 0, 1
120 *
121 * Default Value: 1 (enabled)
122 */
123E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
124
125struct e1000_option {
126 enum { enable_option, range_option, list_option } type;
127 char *name;
128 char *err;
129 int def;
130 union {
131 struct { /* range_option info */
132 int min;
133 int max;
134 } r;
135 struct { /* list_option info */
136 int nr;
137 struct e1000_opt_list { int i; char *str; } *p;
138 } l;
139 } arg;
140};
141
142static int __devinit e1000_validate_option(int *value,
143 struct e1000_option *opt,
144 struct e1000_adapter *adapter)
145{
146 if (*value == OPTION_UNSET) {
147 *value = opt->def;
148 return 0;
149 }
150
151 switch (opt->type) {
152 case enable_option:
153 switch (*value) {
154 case OPTION_ENABLED:
155 ndev_info(adapter->netdev, "%s Enabled\n", opt->name);
156 return 0;
157 case OPTION_DISABLED:
158 ndev_info(adapter->netdev, "%s Disabled\n", opt->name);
159 return 0;
160 }
161 break;
162 case range_option:
163 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
164 ndev_info(adapter->netdev,
165 "%s set to %i\n", opt->name, *value);
166 return 0;
167 }
168 break;
169 case list_option: {
170 int i;
171 struct e1000_opt_list *ent;
172
173 for (i = 0; i < opt->arg.l.nr; i++) {
174 ent = &opt->arg.l.p[i];
175 if (*value == ent->i) {
176 if (ent->str[0] != '\0')
177 ndev_info(adapter->netdev, "%s\n",
178 ent->str);
179 return 0;
180 }
181 }
182 }
183 break;
184 default:
185 BUG();
186 }
187
188 ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n",
189 opt->name, *value, opt->err);
190 *value = opt->def;
191 return -1;
192}
193
194/**
195 * e1000e_check_options - Range Checking for Command Line Parameters
196 * @adapter: board private structure
197 *
198 * This routine checks all command line parameters for valid user
199 * input. If an invalid value is given, or if no user specified
200 * value exists, a default value is used. The final value is stored
201 * in a variable in the adapter structure.
202 **/
203void __devinit e1000e_check_options(struct e1000_adapter *adapter)
204{
205 struct e1000_hw *hw = &adapter->hw;
206 struct net_device *netdev = adapter->netdev;
207 int bd = adapter->bd_number;
208
209 if (bd >= E1000_MAX_NIC) {
210 ndev_notice(netdev,
211 "Warning: no configuration for board #%i\n", bd);
212 ndev_notice(netdev, "Using defaults for all values\n");
213 }
214
215 { /* Transmit Interrupt Delay */
216 struct e1000_option opt = {
217 .type = range_option,
218 .name = "Transmit Interrupt Delay",
219 .err = "using default of "
220 __MODULE_STRING(DEFAULT_TIDV),
221 .def = DEFAULT_TIDV,
222 .arg = { .r = { .min = MIN_TXDELAY,
223 .max = MAX_TXDELAY } }
224 };
225
226 if (num_TxIntDelay > bd) {
227 adapter->tx_int_delay = TxIntDelay[bd];
228 e1000_validate_option(&adapter->tx_int_delay, &opt,
229 adapter);
230 } else {
231 adapter->tx_int_delay = opt.def;
232 }
233 }
234 { /* Transmit Absolute Interrupt Delay */
235 struct e1000_option opt = {
236 .type = range_option,
237 .name = "Transmit Absolute Interrupt Delay",
238 .err = "using default of "
239 __MODULE_STRING(DEFAULT_TADV),
240 .def = DEFAULT_TADV,
241 .arg = { .r = { .min = MIN_TXABSDELAY,
242 .max = MAX_TXABSDELAY } }
243 };
244
245 if (num_TxAbsIntDelay > bd) {
246 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
247 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
248 adapter);
249 } else {
250 adapter->tx_abs_int_delay = opt.def;
251 }
252 }
253 { /* Receive Interrupt Delay */
254 struct e1000_option opt = {
255 .type = range_option,
256 .name = "Receive Interrupt Delay",
257 .err = "using default of "
258 __MODULE_STRING(DEFAULT_RDTR),
259 .def = DEFAULT_RDTR,
260 .arg = { .r = { .min = MIN_RXDELAY,
261 .max = MAX_RXDELAY } }
262 };
263
264 /* modify min and default if 82573 for slow ping w/a,
265 * a value greater than 8 needs to be set for RDTR */
266 if (adapter->flags & FLAG_HAS_ASPM) {
267 opt.def = 32;
268 opt.arg.r.min = 8;
269 }
270
271 if (num_RxIntDelay > bd) {
272 adapter->rx_int_delay = RxIntDelay[bd];
273 e1000_validate_option(&adapter->rx_int_delay, &opt,
274 adapter);
275 } else {
276 adapter->rx_int_delay = opt.def;
277 }
278 }
279 { /* Receive Absolute Interrupt Delay */
280 struct e1000_option opt = {
281 .type = range_option,
282 .name = "Receive Absolute Interrupt Delay",
283 .err = "using default of "
284 __MODULE_STRING(DEFAULT_RADV),
285 .def = DEFAULT_RADV,
286 .arg = { .r = { .min = MIN_RXABSDELAY,
287 .max = MAX_RXABSDELAY } }
288 };
289
290 if (num_RxAbsIntDelay > bd) {
291 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
292 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
293 adapter);
294 } else {
295 adapter->rx_abs_int_delay = opt.def;
296 }
297 }
298 { /* Interrupt Throttling Rate */
299 struct e1000_option opt = {
300 .type = range_option,
301 .name = "Interrupt Throttling Rate (ints/sec)",
302 .err = "using default of "
303 __MODULE_STRING(DEFAULT_ITR),
304 .def = DEFAULT_ITR,
305 .arg = { .r = { .min = MIN_ITR,
306 .max = MAX_ITR } }
307 };
308
309 if (num_InterruptThrottleRate > bd) {
310 adapter->itr = InterruptThrottleRate[bd];
311 switch (adapter->itr) {
312 case 0:
313 ndev_info(netdev, "%s turned off\n",
314 opt.name);
315 break;
316 case 1:
317 ndev_info(netdev,
318 "%s set to dynamic mode\n",
319 opt.name);
320 adapter->itr_setting = adapter->itr;
321 adapter->itr = 20000;
322 break;
323 case 3:
324 ndev_info(netdev,
325 "%s set to dynamic conservative mode\n",
326 opt.name);
327 adapter->itr_setting = adapter->itr;
328 adapter->itr = 20000;
329 break;
330 default:
331 e1000_validate_option(&adapter->itr, &opt,
332 adapter);
333 /*
334 * save the setting, because the dynamic bits
335 * change itr. clear the lower two bits
336 * because they are used as control
337 */
338 adapter->itr_setting = adapter->itr & ~3;
339 break;
340 }
341 } else {
342 adapter->itr_setting = opt.def;
343 adapter->itr = 20000;
344 }
345 }
346 { /* Smart Power Down */
347 struct e1000_option opt = {
348 .type = enable_option,
349 .name = "PHY Smart Power Down",
350 .err = "defaulting to Disabled",
351 .def = OPTION_DISABLED
352 };
353
354 if (num_SmartPowerDownEnable > bd) {
355 int spd = SmartPowerDownEnable[bd];
356 e1000_validate_option(&spd, &opt, adapter);
357 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
358 && spd)
359 adapter->flags |= FLAG_SMART_POWER_DOWN;
360 }
361 }
362 { /* Kumeran Lock Loss Workaround */
363 struct e1000_option opt = {
364 .type = enable_option,
365 .name = "Kumeran Lock Loss Workaround",
366 .err = "defaulting to Enabled",
367 .def = OPTION_ENABLED
368 };
369
370 if (num_KumeranLockLoss > bd) {
371 int kmrn_lock_loss = KumeranLockLoss[bd];
372 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
373 if (hw->mac.type == e1000_ich8lan)
374 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
375 kmrn_lock_loss);
376 } else {
377 if (hw->mac.type == e1000_ich8lan)
378 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
379 opt.def);
380 }
381 }
382}
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
new file mode 100644
index 000000000000..793231810ae0
--- /dev/null
+++ b/drivers/net/e1000e/phy.c
@@ -0,0 +1,1773 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/delay.h>
30
31#include "e1000.h"
32
33static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
36static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37
38/* Cable length tables */
39static const u16 e1000_m88_cable_length_table[] =
40 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
41
42static const u16 e1000_igp_2_cable_length_table[] =
43 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
44 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
45 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
46 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
47 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
48 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
49 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
50 124};
51#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
52 (sizeof(e1000_igp_2_cable_length_table) / \
53 sizeof(e1000_igp_2_cable_length_table[0]))
54
55/**
56 * e1000e_check_reset_block_generic - Check if PHY reset is blocked
57 * @hw: pointer to the HW structure
58 *
59 * Read the PHY management control register and check whether a PHY reset
60 * is blocked. If a reset is not blocked return 0, otherwise
61 * return E1000_BLK_PHY_RESET (12).
62 **/
63s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
64{
65 u32 manc;
66
67 manc = er32(MANC);
68
69 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
70 E1000_BLK_PHY_RESET : 0;
71}
72
73/**
74 * e1000e_get_phy_id - Retrieve the PHY ID and revision
75 * @hw: pointer to the HW structure
76 *
77 * Reads the PHY registers and stores the PHY ID and possibly the PHY
78 * revision in the hardware structure.
79 **/
80s32 e1000e_get_phy_id(struct e1000_hw *hw)
81{
82 struct e1000_phy_info *phy = &hw->phy;
83 s32 ret_val;
84 u16 phy_id;
85
86 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
87 if (ret_val)
88 return ret_val;
89
90 phy->id = (u32)(phy_id << 16);
91 udelay(20);
92 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
93 if (ret_val)
94 return ret_val;
95
96 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
97 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
98
99 return 0;
100}
101
102/**
103 * e1000e_phy_reset_dsp - Reset PHY DSP
104 * @hw: pointer to the HW structure
105 *
106 * Reset the digital signal processor.
107 **/
108s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
109{
110 s32 ret_val;
111
112 ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
113 if (ret_val)
114 return ret_val;
115
116 return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
117}
118
119/**
120 * e1000_read_phy_reg_mdic - Read MDI control register
121 * @hw: pointer to the HW structure
122 * @offset: register offset to be read
123 * @data: pointer to the read data
124 *
125 * Reads the MDI control regsiter in the PHY at offset and stores the
126 * information read to data.
127 **/
128static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
129{
130 struct e1000_phy_info *phy = &hw->phy;
131 u32 i, mdic = 0;
132
133 if (offset > MAX_PHY_REG_ADDRESS) {
134 hw_dbg(hw, "PHY Address %d is out of range\n", offset);
135 return -E1000_ERR_PARAM;
136 }
137
138 /* Set up Op-code, Phy Address, and register offset in the MDI
139 * Control register. The MAC will take care of interfacing with the
140 * PHY to retrieve the desired data.
141 */
142 mdic = ((offset << E1000_MDIC_REG_SHIFT) |
143 (phy->addr << E1000_MDIC_PHY_SHIFT) |
144 (E1000_MDIC_OP_READ));
145
146 ew32(MDIC, mdic);
147
148 /* Poll the ready bit to see if the MDI read completed */
149 for (i = 0; i < 64; i++) {
150 udelay(50);
151 mdic = er32(MDIC);
152 if (mdic & E1000_MDIC_READY)
153 break;
154 }
155 if (!(mdic & E1000_MDIC_READY)) {
156 hw_dbg(hw, "MDI Read did not complete\n");
157 return -E1000_ERR_PHY;
158 }
159 if (mdic & E1000_MDIC_ERROR) {
160 hw_dbg(hw, "MDI Error\n");
161 return -E1000_ERR_PHY;
162 }
163 *data = (u16) mdic;
164
165 return 0;
166}
167
168/**
169 * e1000_write_phy_reg_mdic - Write MDI control register
170 * @hw: pointer to the HW structure
171 * @offset: register offset to write to
172 * @data: data to write to register at offset
173 *
174 * Writes data to MDI control register in the PHY at offset.
175 **/
176static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
177{
178 struct e1000_phy_info *phy = &hw->phy;
179 u32 i, mdic = 0;
180
181 if (offset > MAX_PHY_REG_ADDRESS) {
182 hw_dbg(hw, "PHY Address %d is out of range\n", offset);
183 return -E1000_ERR_PARAM;
184 }
185
186 /* Set up Op-code, Phy Address, and register offset in the MDI
187 * Control register. The MAC will take care of interfacing with the
188 * PHY to retrieve the desired data.
189 */
190 mdic = (((u32)data) |
191 (offset << E1000_MDIC_REG_SHIFT) |
192 (phy->addr << E1000_MDIC_PHY_SHIFT) |
193 (E1000_MDIC_OP_WRITE));
194
195 ew32(MDIC, mdic);
196
197 /* Poll the ready bit to see if the MDI read completed */
198 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
199 udelay(5);
200 mdic = er32(MDIC);
201 if (mdic & E1000_MDIC_READY)
202 break;
203 }
204 if (!(mdic & E1000_MDIC_READY)) {
205 hw_dbg(hw, "MDI Write did not complete\n");
206 return -E1000_ERR_PHY;
207 }
208
209 return 0;
210}
211
212/**
213 * e1000e_read_phy_reg_m88 - Read m88 PHY register
214 * @hw: pointer to the HW structure
215 * @offset: register offset to be read
216 * @data: pointer to the read data
217 *
218 * Acquires semaphore, if necessary, then reads the PHY register at offset
219 * and storing the retrieved information in data. Release any acquired
220 * semaphores before exiting.
221 **/
222s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
223{
224 s32 ret_val;
225
226 ret_val = hw->phy.ops.acquire_phy(hw);
227 if (ret_val)
228 return ret_val;
229
230 ret_val = e1000_read_phy_reg_mdic(hw,
231 MAX_PHY_REG_ADDRESS & offset,
232 data);
233
234 hw->phy.ops.release_phy(hw);
235
236 return ret_val;
237}
238
239/**
240 * e1000e_write_phy_reg_m88 - Write m88 PHY register
241 * @hw: pointer to the HW structure
242 * @offset: register offset to write to
243 * @data: data to write at register offset
244 *
245 * Acquires semaphore, if necessary, then writes the data to PHY register
246 * at the offset. Release any acquired semaphores before exiting.
247 **/
248s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
249{
250 s32 ret_val;
251
252 ret_val = hw->phy.ops.acquire_phy(hw);
253 if (ret_val)
254 return ret_val;
255
256 ret_val = e1000_write_phy_reg_mdic(hw,
257 MAX_PHY_REG_ADDRESS & offset,
258 data);
259
260 hw->phy.ops.release_phy(hw);
261
262 return ret_val;
263}
264
265/**
266 * e1000e_read_phy_reg_igp - Read igp PHY register
267 * @hw: pointer to the HW structure
268 * @offset: register offset to be read
269 * @data: pointer to the read data
270 *
271 * Acquires semaphore, if necessary, then reads the PHY register at offset
272 * and storing the retrieved information in data. Release any acquired
273 * semaphores before exiting.
274 **/
275s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
276{
277 s32 ret_val;
278
279 ret_val = hw->phy.ops.acquire_phy(hw);
280 if (ret_val)
281 return ret_val;
282
283 if (offset > MAX_PHY_MULTI_PAGE_REG) {
284 ret_val = e1000_write_phy_reg_mdic(hw,
285 IGP01E1000_PHY_PAGE_SELECT,
286 (u16)offset);
287 if (ret_val) {
288 hw->phy.ops.release_phy(hw);
289 return ret_val;
290 }
291 }
292
293 ret_val = e1000_read_phy_reg_mdic(hw,
294 MAX_PHY_REG_ADDRESS & offset,
295 data);
296
297 hw->phy.ops.release_phy(hw);
298
299 return ret_val;
300}
301
302/**
303 * e1000e_write_phy_reg_igp - Write igp PHY register
304 * @hw: pointer to the HW structure
305 * @offset: register offset to write to
306 * @data: data to write at register offset
307 *
308 * Acquires semaphore, if necessary, then writes the data to PHY register
309 * at the offset. Release any acquired semaphores before exiting.
310 **/
311s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
312{
313 s32 ret_val;
314
315 ret_val = hw->phy.ops.acquire_phy(hw);
316 if (ret_val)
317 return ret_val;
318
319 if (offset > MAX_PHY_MULTI_PAGE_REG) {
320 ret_val = e1000_write_phy_reg_mdic(hw,
321 IGP01E1000_PHY_PAGE_SELECT,
322 (u16)offset);
323 if (ret_val) {
324 hw->phy.ops.release_phy(hw);
325 return ret_val;
326 }
327 }
328
329 ret_val = e1000_write_phy_reg_mdic(hw,
330 MAX_PHY_REG_ADDRESS & offset,
331 data);
332
333 hw->phy.ops.release_phy(hw);
334
335 return ret_val;
336}
337
338/**
339 * e1000e_read_kmrn_reg - Read kumeran register
340 * @hw: pointer to the HW structure
341 * @offset: register offset to be read
342 * @data: pointer to the read data
343 *
344 * Acquires semaphore, if necessary. Then reads the PHY register at offset
345 * using the kumeran interface. The information retrieved is stored in data.
346 * Release any acquired semaphores before exiting.
347 **/
348s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
349{
350 u32 kmrnctrlsta;
351 s32 ret_val;
352
353 ret_val = hw->phy.ops.acquire_phy(hw);
354 if (ret_val)
355 return ret_val;
356
357 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
358 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
359 ew32(KMRNCTRLSTA, kmrnctrlsta);
360
361 udelay(2);
362
363 kmrnctrlsta = er32(KMRNCTRLSTA);
364 *data = (u16)kmrnctrlsta;
365
366 hw->phy.ops.release_phy(hw);
367
368 return ret_val;
369}
370
371/**
372 * e1000e_write_kmrn_reg - Write kumeran register
373 * @hw: pointer to the HW structure
374 * @offset: register offset to write to
375 * @data: data to write at register offset
376 *
377 * Acquires semaphore, if necessary. Then write the data to PHY register
378 * at the offset using the kumeran interface. Release any acquired semaphores
379 * before exiting.
380 **/
381s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
382{
383 u32 kmrnctrlsta;
384 s32 ret_val;
385
386 ret_val = hw->phy.ops.acquire_phy(hw);
387 if (ret_val)
388 return ret_val;
389
390 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
391 E1000_KMRNCTRLSTA_OFFSET) | data;
392 ew32(KMRNCTRLSTA, kmrnctrlsta);
393
394 udelay(2);
395 hw->phy.ops.release_phy(hw);
396
397 return ret_val;
398}
399
400/**
401 * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
402 * @hw: pointer to the HW structure
403 *
404 * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
405 * and downshift values are set also.
406 **/
407s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
408{
409 struct e1000_phy_info *phy = &hw->phy;
410 s32 ret_val;
411 u16 phy_data;
412
413 /* Enable CRS on TX. This must be set for half-duplex operation. */
414 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
415 if (ret_val)
416 return ret_val;
417
418 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
419
420 /* Options:
421 * MDI/MDI-X = 0 (default)
422 * 0 - Auto for all speeds
423 * 1 - MDI mode
424 * 2 - MDI-X mode
425 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
426 */
427 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
428
429 switch (phy->mdix) {
430 case 1:
431 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
432 break;
433 case 2:
434 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
435 break;
436 case 3:
437 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
438 break;
439 case 0:
440 default:
441 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
442 break;
443 }
444
445 /* Options:
446 * disable_polarity_correction = 0 (default)
447 * Automatic Correction for Reversed Cable Polarity
448 * 0 - Disabled
449 * 1 - Enabled
450 */
451 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
452 if (phy->disable_polarity_correction == 1)
453 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
454
455 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
456 if (ret_val)
457 return ret_val;
458
459 if (phy->revision < 4) {
460 /* Force TX_CLK in the Extended PHY Specific Control Register
461 * to 25MHz clock.
462 */
463 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
464 if (ret_val)
465 return ret_val;
466
467 phy_data |= M88E1000_EPSCR_TX_CLK_25;
468
469 if ((phy->revision == 2) &&
470 (phy->id == M88E1111_I_PHY_ID)) {
471 /* 82573L PHY - set the downshift counter to 5x. */
472 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
473 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
474 } else {
475 /* Configure Master and Slave downshift values */
476 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
477 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
478 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
479 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
480 }
481 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
482 if (ret_val)
483 return ret_val;
484 }
485
486 /* Commit the changes. */
487 ret_val = e1000e_commit_phy(hw);
488 if (ret_val)
489 hw_dbg(hw, "Error committing the PHY changes\n");
490
491 return ret_val;
492}
493
494/**
495 * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
496 * @hw: pointer to the HW structure
497 *
498 * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
499 * igp PHY's.
500 **/
501s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
502{
503 struct e1000_phy_info *phy = &hw->phy;
504 s32 ret_val;
505 u16 data;
506
507 ret_val = e1000_phy_hw_reset(hw);
508 if (ret_val) {
509 hw_dbg(hw, "Error resetting the PHY.\n");
510 return ret_val;
511 }
512
513 /* Wait 15ms for MAC to configure PHY from NVM settings. */
514 msleep(15);
515
516 /* disable lplu d0 during driver init */
517 ret_val = e1000_set_d0_lplu_state(hw, 0);
518 if (ret_val) {
519 hw_dbg(hw, "Error Disabling LPLU D0\n");
520 return ret_val;
521 }
522 /* Configure mdi-mdix settings */
523 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
524 if (ret_val)
525 return ret_val;
526
527 data &= ~IGP01E1000_PSCR_AUTO_MDIX;
528
529 switch (phy->mdix) {
530 case 1:
531 data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
532 break;
533 case 2:
534 data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
535 break;
536 case 0:
537 default:
538 data |= IGP01E1000_PSCR_AUTO_MDIX;
539 break;
540 }
541 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
542 if (ret_val)
543 return ret_val;
544
545 /* set auto-master slave resolution settings */
546 if (hw->mac.autoneg) {
547 /* when autonegotiation advertisement is only 1000Mbps then we
548 * should disable SmartSpeed and enable Auto MasterSlave
549 * resolution as hardware default. */
550 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
551 /* Disable SmartSpeed */
552 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
553 &data);
554 if (ret_val)
555 return ret_val;
556
557 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
558 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
559 data);
560 if (ret_val)
561 return ret_val;
562
563 /* Set auto Master/Slave resolution process */
564 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
565 if (ret_val)
566 return ret_val;
567
568 data &= ~CR_1000T_MS_ENABLE;
569 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
570 if (ret_val)
571 return ret_val;
572 }
573
574 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
575 if (ret_val)
576 return ret_val;
577
578 /* load defaults for future use */
579 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
580 ((data & CR_1000T_MS_VALUE) ?
581 e1000_ms_force_master :
582 e1000_ms_force_slave) :
583 e1000_ms_auto;
584
585 switch (phy->ms_type) {
586 case e1000_ms_force_master:
587 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
588 break;
589 case e1000_ms_force_slave:
590 data |= CR_1000T_MS_ENABLE;
591 data &= ~(CR_1000T_MS_VALUE);
592 break;
593 case e1000_ms_auto:
594 data &= ~CR_1000T_MS_ENABLE;
595 default:
596 break;
597 }
598 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
599 }
600
601 return ret_val;
602}
603
604/**
605 * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
606 * @hw: pointer to the HW structure
607 *
608 * Reads the MII auto-neg advertisement register and/or the 1000T control
609 * register and if the PHY is already setup for auto-negotiation, then
610 * return successful. Otherwise, setup advertisement and flow control to
611 * the appropriate values for the wanted auto-negotiation.
612 **/
613static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
614{
615 struct e1000_phy_info *phy = &hw->phy;
616 s32 ret_val;
617 u16 mii_autoneg_adv_reg;
618 u16 mii_1000t_ctrl_reg = 0;
619
620 phy->autoneg_advertised &= phy->autoneg_mask;
621
622 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
623 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
624 if (ret_val)
625 return ret_val;
626
627 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
628 /* Read the MII 1000Base-T Control Register (Address 9). */
629 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
630 if (ret_val)
631 return ret_val;
632 }
633
634 /* Need to parse both autoneg_advertised and fc and set up
635 * the appropriate PHY registers. First we will parse for
636 * autoneg_advertised software override. Since we can advertise
637 * a plethora of combinations, we need to check each bit
638 * individually.
639 */
640
641 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
642 * Advertisement Register (Address 4) and the 1000 mb speed bits in
643 * the 1000Base-T Control Register (Address 9).
644 */
645 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
646 NWAY_AR_100TX_HD_CAPS |
647 NWAY_AR_10T_FD_CAPS |
648 NWAY_AR_10T_HD_CAPS);
649 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
650
651 hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised);
652
653 /* Do we want to advertise 10 Mb Half Duplex? */
654 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
655 hw_dbg(hw, "Advertise 10mb Half duplex\n");
656 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
657 }
658
659 /* Do we want to advertise 10 Mb Full Duplex? */
660 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
661 hw_dbg(hw, "Advertise 10mb Full duplex\n");
662 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
663 }
664
665 /* Do we want to advertise 100 Mb Half Duplex? */
666 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
667 hw_dbg(hw, "Advertise 100mb Half duplex\n");
668 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
669 }
670
671 /* Do we want to advertise 100 Mb Full Duplex? */
672 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
673 hw_dbg(hw, "Advertise 100mb Full duplex\n");
674 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
675 }
676
677 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
678 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
679 hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n");
680
681 /* Do we want to advertise 1000 Mb Full Duplex? */
682 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
683 hw_dbg(hw, "Advertise 1000mb Full duplex\n");
684 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
685 }
686
687 /* Check for a software override of the flow control settings, and
688 * setup the PHY advertisement registers accordingly. If
689 * auto-negotiation is enabled, then software will have to set the
690 * "PAUSE" bits to the correct value in the Auto-Negotiation
691 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
692 * negotiation.
693 *
694 * The possible values of the "fc" parameter are:
695 * 0: Flow control is completely disabled
696 * 1: Rx flow control is enabled (we can receive pause frames
697 * but not send pause frames).
698 * 2: Tx flow control is enabled (we can send pause frames
699 * but we do not support receiving pause frames).
700 * 3: Both Rx and TX flow control (symmetric) are enabled.
701 * other: No software override. The flow control configuration
702 * in the EEPROM is used.
703 */
704 switch (hw->mac.fc) {
705 case e1000_fc_none:
706 /* Flow control (RX & TX) is completely disabled by a
707 * software over-ride.
708 */
709 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
710 break;
711 case e1000_fc_rx_pause:
712 /* RX Flow control is enabled, and TX Flow control is
713 * disabled, by a software over-ride.
714 */
715 /* Since there really isn't a way to advertise that we are
716 * capable of RX Pause ONLY, we will advertise that we
717 * support both symmetric and asymmetric RX PAUSE. Later
718 * (in e1000e_config_fc_after_link_up) we will disable the
719 * hw's ability to send PAUSE frames.
720 */
721 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
722 break;
723 case e1000_fc_tx_pause:
724 /* TX Flow control is enabled, and RX Flow control is
725 * disabled, by a software over-ride.
726 */
727 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
728 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
729 break;
730 case e1000_fc_full:
731 /* Flow control (both RX and TX) is enabled by a software
732 * over-ride.
733 */
734 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
735 break;
736 default:
737 hw_dbg(hw, "Flow control param set incorrectly\n");
738 ret_val = -E1000_ERR_CONFIG;
739 return ret_val;
740 }
741
742 ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
743 if (ret_val)
744 return ret_val;
745
746 hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
747
748 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
749 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
750 }
751
752 return ret_val;
753}
754
755/**
756 * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
757 * @hw: pointer to the HW structure
758 *
759 * Performs initial bounds checking on autoneg advertisement parameter, then
760 * configure to advertise the full capability. Setup the PHY to autoneg
761 * and restart the negotiation process between the link partner. If
762 * wait_for_link, then wait for autoneg to complete before exiting.
763 **/
764static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
765{
766 struct e1000_phy_info *phy = &hw->phy;
767 s32 ret_val;
768 u16 phy_ctrl;
769
770 /* Perform some bounds checking on the autoneg advertisement
771 * parameter.
772 */
773 phy->autoneg_advertised &= phy->autoneg_mask;
774
775 /* If autoneg_advertised is zero, we assume it was not defaulted
776 * by the calling code so we set to advertise full capability.
777 */
778 if (phy->autoneg_advertised == 0)
779 phy->autoneg_advertised = phy->autoneg_mask;
780
781 hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n");
782 ret_val = e1000_phy_setup_autoneg(hw);
783 if (ret_val) {
784 hw_dbg(hw, "Error Setting up Auto-Negotiation\n");
785 return ret_val;
786 }
787 hw_dbg(hw, "Restarting Auto-Neg\n");
788
789 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
790 * the Auto Neg Restart bit in the PHY control register.
791 */
792 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
793 if (ret_val)
794 return ret_val;
795
796 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
797 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
798 if (ret_val)
799 return ret_val;
800
801 /* Does the user want to wait for Auto-Neg to complete here, or
802 * check at a later time (for example, callback routine).
803 */
804 if (phy->wait_for_link) {
805 ret_val = e1000_wait_autoneg(hw);
806 if (ret_val) {
807 hw_dbg(hw, "Error while waiting for "
808 "autoneg to complete\n");
809 return ret_val;
810 }
811 }
812
813 hw->mac.get_link_status = 1;
814
815 return ret_val;
816}
817
818/**
819 * e1000e_setup_copper_link - Configure copper link settings
820 * @hw: pointer to the HW structure
821 *
822 * Calls the appropriate function to configure the link for auto-neg or forced
823 * speed and duplex. Then we check for link, once link is established calls
824 * to configure collision distance and flow control are called. If link is
825 * not established, we return -E1000_ERR_PHY (-2).
826 **/
827s32 e1000e_setup_copper_link(struct e1000_hw *hw)
828{
829 s32 ret_val;
830 bool link;
831
832 if (hw->mac.autoneg) {
833 /* Setup autoneg and flow control advertisement and perform
834 * autonegotiation. */
835 ret_val = e1000_copper_link_autoneg(hw);
836 if (ret_val)
837 return ret_val;
838 } else {
839 /* PHY will be set to 10H, 10F, 100H or 100F
840 * depending on user settings. */
841 hw_dbg(hw, "Forcing Speed and Duplex\n");
842 ret_val = e1000_phy_force_speed_duplex(hw);
843 if (ret_val) {
844 hw_dbg(hw, "Error Forcing Speed and Duplex\n");
845 return ret_val;
846 }
847 }
848
849 /* Check link status. Wait up to 100 microseconds for link to become
850 * valid.
851 */
852 ret_val = e1000e_phy_has_link_generic(hw,
853 COPPER_LINK_UP_LIMIT,
854 10,
855 &link);
856 if (ret_val)
857 return ret_val;
858
859 if (link) {
860 hw_dbg(hw, "Valid link established!!!\n");
861 e1000e_config_collision_dist(hw);
862 ret_val = e1000e_config_fc_after_link_up(hw);
863 } else {
864 hw_dbg(hw, "Unable to establish link!!!\n");
865 }
866
867 return ret_val;
868}
869
870/**
871 * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
872 * @hw: pointer to the HW structure
873 *
874 * Calls the PHY setup function to force speed and duplex. Clears the
875 * auto-crossover to force MDI manually. Waits for link and returns
876 * successful if link up is successful, else -E1000_ERR_PHY (-2).
877 **/
878s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
879{
880 struct e1000_phy_info *phy = &hw->phy;
881 s32 ret_val;
882 u16 phy_data;
883 bool link;
884
885 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
886 if (ret_val)
887 return ret_val;
888
889 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
890
891 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
892 if (ret_val)
893 return ret_val;
894
895 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
896 * forced whenever speed and duplex are forced.
897 */
898 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
899 if (ret_val)
900 return ret_val;
901
902 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
903 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
904
905 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
906 if (ret_val)
907 return ret_val;
908
909 hw_dbg(hw, "IGP PSCR: %X\n", phy_data);
910
911 udelay(1);
912
913 if (phy->wait_for_link) {
914 hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
915
916 ret_val = e1000e_phy_has_link_generic(hw,
917 PHY_FORCE_LIMIT,
918 100000,
919 &link);
920 if (ret_val)
921 return ret_val;
922
923 if (!link)
924 hw_dbg(hw, "Link taking longer than expected.\n");
925
926 /* Try once more */
927 ret_val = e1000e_phy_has_link_generic(hw,
928 PHY_FORCE_LIMIT,
929 100000,
930 &link);
931 if (ret_val)
932 return ret_val;
933 }
934
935 return ret_val;
936}
937
938/**
939 * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
940 * @hw: pointer to the HW structure
941 *
942 * Calls the PHY setup function to force speed and duplex. Clears the
943 * auto-crossover to force MDI manually. Resets the PHY to commit the
944 * changes. If time expires while waiting for link up, we reset the DSP.
945 * After reset, TX_CLK and CRS on TX must be set. Return successful upon
946 * successful completion, else return corresponding error code.
947 **/
948s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
949{
950 struct e1000_phy_info *phy = &hw->phy;
951 s32 ret_val;
952 u16 phy_data;
953 bool link;
954
955 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
956 * forced whenever speed and duplex are forced.
957 */
958 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
959 if (ret_val)
960 return ret_val;
961
962 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
963 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
964 if (ret_val)
965 return ret_val;
966
967 hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data);
968
969 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
970 if (ret_val)
971 return ret_val;
972
973 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
974
975 /* Reset the phy to commit changes. */
976 phy_data |= MII_CR_RESET;
977
978 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
979 if (ret_val)
980 return ret_val;
981
982 udelay(1);
983
984 if (phy->wait_for_link) {
985 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
986
987 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
988 100000, &link);
989 if (ret_val)
990 return ret_val;
991
992 if (!link) {
993 /* We didn't get link.
994 * Reset the DSP and cross our fingers.
995 */
996 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d);
997 if (ret_val)
998 return ret_val;
999 ret_val = e1000e_phy_reset_dsp(hw);
1000 if (ret_val)
1001 return ret_val;
1002 }
1003
1004 /* Try once more */
1005 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1006 100000, &link);
1007 if (ret_val)
1008 return ret_val;
1009 }
1010
1011 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1012 if (ret_val)
1013 return ret_val;
1014
1015 /* Resetting the phy means we need to re-force TX_CLK in the
1016 * Extended PHY Specific Control Register to 25MHz clock from
1017 * the reset value of 2.5MHz.
1018 */
1019 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1020 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1021 if (ret_val)
1022 return ret_val;
1023
1024 /* In addition, we must re-enable CRS on Tx for both half and full
1025 * duplex.
1026 */
1027 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1028 if (ret_val)
1029 return ret_val;
1030
1031 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1032 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1033
1034 return ret_val;
1035}
1036
1037/**
1038 * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
1039 * @hw: pointer to the HW structure
1040 * @phy_ctrl: pointer to current value of PHY_CONTROL
1041 *
1042 * Forces speed and duplex on the PHY by doing the following: disable flow
1043 * control, force speed/duplex on the MAC, disable auto speed detection,
1044 * disable auto-negotiation, configure duplex, configure speed, configure
1045 * the collision distance, write configuration to CTRL register. The
1046 * caller must write to the PHY_CONTROL register for these settings to
1047 * take affect.
1048 **/
1049void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1050{
1051 struct e1000_mac_info *mac = &hw->mac;
1052 u32 ctrl;
1053
1054 /* Turn off flow control when forcing speed/duplex */
1055 mac->fc = e1000_fc_none;
1056
1057 /* Force speed/duplex on the mac */
1058 ctrl = er32(CTRL);
1059 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1060 ctrl &= ~E1000_CTRL_SPD_SEL;
1061
1062 /* Disable Auto Speed Detection */
1063 ctrl &= ~E1000_CTRL_ASDE;
1064
1065 /* Disable autoneg on the phy */
1066 *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
1067
1068 /* Forcing Full or Half Duplex? */
1069 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1070 ctrl &= ~E1000_CTRL_FD;
1071 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1072 hw_dbg(hw, "Half Duplex\n");
1073 } else {
1074 ctrl |= E1000_CTRL_FD;
1075 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1076 hw_dbg(hw, "Full Duplex\n");
1077 }
1078
1079 /* Forcing 10mb or 100mb? */
1080 if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
1081 ctrl |= E1000_CTRL_SPD_100;
1082 *phy_ctrl |= MII_CR_SPEED_100;
1083 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1084 hw_dbg(hw, "Forcing 100mb\n");
1085 } else {
1086 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1087 *phy_ctrl |= MII_CR_SPEED_10;
1088 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1089 hw_dbg(hw, "Forcing 10mb\n");
1090 }
1091
1092 e1000e_config_collision_dist(hw);
1093
1094 ew32(CTRL, ctrl);
1095}
1096
1097/**
1098 * e1000e_set_d3_lplu_state - Sets low power link up state for D3
1099 * @hw: pointer to the HW structure
1100 * @active: boolean used to enable/disable lplu
1101 *
1102 * Success returns 0, Failure returns 1
1103 *
1104 * The low power link up (lplu) state is set to the power management level D3
1105 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1106 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1107 * is used during Dx states where the power conservation is most important.
1108 * During driver activity, SmartSpeed should be enabled so performance is
1109 * maintained.
1110 **/
1111s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1112{
1113 struct e1000_phy_info *phy = &hw->phy;
1114 s32 ret_val;
1115 u16 data;
1116
1117 ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1118 if (ret_val)
1119 return ret_val;
1120
1121 if (!active) {
1122 data &= ~IGP02E1000_PM_D3_LPLU;
1123 ret_val = e1e_wphy(hw,
1124 IGP02E1000_PHY_POWER_MGMT,
1125 data);
1126 if (ret_val)
1127 return ret_val;
1128 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1129 * during Dx states where the power conservation is most
1130 * important. During driver activity we should enable
1131 * SmartSpeed, so performance is maintained. */
1132 if (phy->smart_speed == e1000_smart_speed_on) {
1133 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1134 &data);
1135 if (ret_val)
1136 return ret_val;
1137
1138 data |= IGP01E1000_PSCFR_SMART_SPEED;
1139 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1140 data);
1141 if (ret_val)
1142 return ret_val;
1143 } else if (phy->smart_speed == e1000_smart_speed_off) {
1144 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1145 &data);
1146 if (ret_val)
1147 return ret_val;
1148
1149 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1150 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1151 data);
1152 if (ret_val)
1153 return ret_val;
1154 }
1155 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1156 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1157 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1158 data |= IGP02E1000_PM_D3_LPLU;
1159 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1160 if (ret_val)
1161 return ret_val;
1162
1163 /* When LPLU is enabled, we should disable SmartSpeed */
1164 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1165 if (ret_val)
1166 return ret_val;
1167
1168 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1169 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1170 }
1171
1172 return ret_val;
1173}
1174
1175/**
1176 * e1000e_check_downshift - Checks whether a downshift in speed occured
1177 * @hw: pointer to the HW structure
1178 *
1179 * Success returns 0, Failure returns 1
1180 *
1181 * A downshift is detected by querying the PHY link health.
1182 **/
1183s32 e1000e_check_downshift(struct e1000_hw *hw)
1184{
1185 struct e1000_phy_info *phy = &hw->phy;
1186 s32 ret_val;
1187 u16 phy_data, offset, mask;
1188
1189 switch (phy->type) {
1190 case e1000_phy_m88:
1191 case e1000_phy_gg82563:
1192 offset = M88E1000_PHY_SPEC_STATUS;
1193 mask = M88E1000_PSSR_DOWNSHIFT;
1194 break;
1195 case e1000_phy_igp_2:
1196 case e1000_phy_igp_3:
1197 offset = IGP01E1000_PHY_LINK_HEALTH;
1198 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1199 break;
1200 default:
1201 /* speed downshift not supported */
1202 phy->speed_downgraded = 0;
1203 return 0;
1204 }
1205
1206 ret_val = e1e_rphy(hw, offset, &phy_data);
1207
1208 if (!ret_val)
1209 phy->speed_downgraded = (phy_data & mask);
1210
1211 return ret_val;
1212}
1213
1214/**
1215 * e1000_check_polarity_m88 - Checks the polarity.
1216 * @hw: pointer to the HW structure
1217 *
1218 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1219 *
1220 * Polarity is determined based on the PHY specific status register.
1221 **/
1222static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
1223{
1224 struct e1000_phy_info *phy = &hw->phy;
1225 s32 ret_val;
1226 u16 data;
1227
1228 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
1229
1230 if (!ret_val)
1231 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
1232 ? e1000_rev_polarity_reversed
1233 : e1000_rev_polarity_normal;
1234
1235 return ret_val;
1236}
1237
1238/**
1239 * e1000_check_polarity_igp - Checks the polarity.
1240 * @hw: pointer to the HW structure
1241 *
1242 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1243 *
1244 * Polarity is determined based on the PHY port status register, and the
1245 * current speed (since there is no polarity at 100Mbps).
1246 **/
1247static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1248{
1249 struct e1000_phy_info *phy = &hw->phy;
1250 s32 ret_val;
1251 u16 data, offset, mask;
1252
1253 /* Polarity is determined based on the speed of
1254 * our connection. */
1255 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1256 if (ret_val)
1257 return ret_val;
1258
1259 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1260 IGP01E1000_PSSR_SPEED_1000MBPS) {
1261 offset = IGP01E1000_PHY_PCS_INIT_REG;
1262 mask = IGP01E1000_PHY_POLARITY_MASK;
1263 } else {
1264 /* This really only applies to 10Mbps since
1265 * there is no polarity for 100Mbps (always 0).
1266 */
1267 offset = IGP01E1000_PHY_PORT_STATUS;
1268 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1269 }
1270
1271 ret_val = e1e_rphy(hw, offset, &data);
1272
1273 if (!ret_val)
1274 phy->cable_polarity = (data & mask)
1275 ? e1000_rev_polarity_reversed
1276 : e1000_rev_polarity_normal;
1277
1278 return ret_val;
1279}
1280
1281/**
1282 * e1000_wait_autoneg - Wait for auto-neg compeletion
1283 * @hw: pointer to the HW structure
1284 *
1285 * Waits for auto-negotiation to complete or for the auto-negotiation time
1286 * limit to expire, which ever happens first.
1287 **/
1288static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1289{
1290 s32 ret_val = 0;
1291 u16 i, phy_status;
1292
1293 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1294 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1295 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1296 if (ret_val)
1297 break;
1298 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1299 if (ret_val)
1300 break;
1301 if (phy_status & MII_SR_AUTONEG_COMPLETE)
1302 break;
1303 msleep(100);
1304 }
1305
1306 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1307 * has completed.
1308 */
1309 return ret_val;
1310}
1311
1312/**
1313 * e1000e_phy_has_link_generic - Polls PHY for link
1314 * @hw: pointer to the HW structure
1315 * @iterations: number of times to poll for link
1316 * @usec_interval: delay between polling attempts
1317 * @success: pointer to whether polling was successful or not
1318 *
1319 * Polls the PHY status register for link, 'iterations' number of times.
1320 **/
1321s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1322 u32 usec_interval, bool *success)
1323{
1324 s32 ret_val = 0;
1325 u16 i, phy_status;
1326
1327 for (i = 0; i < iterations; i++) {
1328 /* Some PHYs require the PHY_STATUS register to be read
1329 * twice due to the link bit being sticky. No harm doing
1330 * it across the board.
1331 */
1332 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1333 if (ret_val)
1334 break;
1335 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1336 if (ret_val)
1337 break;
1338 if (phy_status & MII_SR_LINK_STATUS)
1339 break;
1340 if (usec_interval >= 1000)
1341 mdelay(usec_interval/1000);
1342 else
1343 udelay(usec_interval);
1344 }
1345
1346 *success = (i < iterations);
1347
1348 return ret_val;
1349}
1350
1351/**
1352 * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
1353 * @hw: pointer to the HW structure
1354 *
1355 * Reads the PHY specific status register to retrieve the cable length
1356 * information. The cable length is determined by averaging the minimum and
1357 * maximum values to get the "average" cable length. The m88 PHY has four
1358 * possible cable length values, which are:
1359 * Register Value Cable Length
1360 * 0 < 50 meters
1361 * 1 50 - 80 meters
1362 * 2 80 - 110 meters
1363 * 3 110 - 140 meters
1364 * 4 > 140 meters
1365 **/
1366s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1367{
1368 struct e1000_phy_info *phy = &hw->phy;
1369 s32 ret_val;
1370 u16 phy_data, index;
1371
1372 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1373 if (ret_val)
1374 return ret_val;
1375
1376 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1377 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1378 phy->min_cable_length = e1000_m88_cable_length_table[index];
1379 phy->max_cable_length = e1000_m88_cable_length_table[index+1];
1380
1381 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1382
1383 return ret_val;
1384}
1385
1386/**
1387 * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1388 * @hw: pointer to the HW structure
1389 *
1390 * The automatic gain control (agc) normalizes the amplitude of the
1391 * received signal, adjusting for the attenuation produced by the
1392 * cable. By reading the AGC registers, which reperesent the
1393 * cobination of course and fine gain value, the value can be put
1394 * into a lookup table to obtain the approximate cable length
1395 * for each channel.
1396 **/
1397s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1398{
1399 struct e1000_phy_info *phy = &hw->phy;
1400 s32 ret_val;
1401 u16 phy_data, i, agc_value = 0;
1402 u16 cur_agc_index, max_agc_index = 0;
1403 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1404 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
1405 {IGP02E1000_PHY_AGC_A,
1406 IGP02E1000_PHY_AGC_B,
1407 IGP02E1000_PHY_AGC_C,
1408 IGP02E1000_PHY_AGC_D};
1409
1410 /* Read the AGC registers for all channels */
1411 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1412 ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
1413 if (ret_val)
1414 return ret_val;
1415
1416 /* Getting bits 15:9, which represent the combination of
1417 * course and fine gain values. The result is a number
1418 * that can be put into the lookup table to obtain the
1419 * approximate cable length. */
1420 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1421 IGP02E1000_AGC_LENGTH_MASK;
1422
1423 /* Array index bound check. */
1424 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
1425 (cur_agc_index == 0))
1426 return -E1000_ERR_PHY;
1427
1428 /* Remove min & max AGC values from calculation. */
1429 if (e1000_igp_2_cable_length_table[min_agc_index] >
1430 e1000_igp_2_cable_length_table[cur_agc_index])
1431 min_agc_index = cur_agc_index;
1432 if (e1000_igp_2_cable_length_table[max_agc_index] <
1433 e1000_igp_2_cable_length_table[cur_agc_index])
1434 max_agc_index = cur_agc_index;
1435
1436 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
1437 }
1438
1439 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
1440 e1000_igp_2_cable_length_table[max_agc_index]);
1441 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1442
1443 /* Calculate cable length with the error range of +/- 10 meters. */
1444 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1445 (agc_value - IGP02E1000_AGC_RANGE) : 0;
1446 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1447
1448 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1449
1450 return ret_val;
1451}
1452
1453/**
1454 * e1000e_get_phy_info_m88 - Retrieve PHY information
1455 * @hw: pointer to the HW structure
1456 *
1457 * Valid for only copper links. Read the PHY status register (sticky read)
1458 * to verify that link is up. Read the PHY special control register to
1459 * determine the polarity and 10base-T extended distance. Read the PHY
1460 * special status register to determine MDI/MDIx and current speed. If
1461 * speed is 1000, then determine cable length, local and remote receiver.
1462 **/
1463s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1464{
1465 struct e1000_phy_info *phy = &hw->phy;
1466 s32 ret_val;
1467 u16 phy_data;
1468 bool link;
1469
1470 if (hw->media_type != e1000_media_type_copper) {
1471 hw_dbg(hw, "Phy info is only valid for copper media\n");
1472 return -E1000_ERR_CONFIG;
1473 }
1474
1475 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1476 if (ret_val)
1477 return ret_val;
1478
1479 if (!link) {
1480 hw_dbg(hw, "Phy info is only valid if link is up\n");
1481 return -E1000_ERR_CONFIG;
1482 }
1483
1484 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1485 if (ret_val)
1486 return ret_val;
1487
1488 phy->polarity_correction = (phy_data &
1489 M88E1000_PSCR_POLARITY_REVERSAL);
1490
1491 ret_val = e1000_check_polarity_m88(hw);
1492 if (ret_val)
1493 return ret_val;
1494
1495 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1496 if (ret_val)
1497 return ret_val;
1498
1499 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
1500
1501 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1502 ret_val = e1000_get_cable_length(hw);
1503 if (ret_val)
1504 return ret_val;
1505
1506 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
1507 if (ret_val)
1508 return ret_val;
1509
1510 phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
1511 ? e1000_1000t_rx_status_ok
1512 : e1000_1000t_rx_status_not_ok;
1513
1514 phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
1515 ? e1000_1000t_rx_status_ok
1516 : e1000_1000t_rx_status_not_ok;
1517 } else {
1518 /* Set values to "undefined" */
1519 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1520 phy->local_rx = e1000_1000t_rx_status_undefined;
1521 phy->remote_rx = e1000_1000t_rx_status_undefined;
1522 }
1523
1524 return ret_val;
1525}
1526
1527/**
1528 * e1000e_get_phy_info_igp - Retrieve igp PHY information
1529 * @hw: pointer to the HW structure
1530 *
1531 * Read PHY status to determine if link is up. If link is up, then
1532 * set/determine 10base-T extended distance and polarity correction. Read
1533 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
1534 * determine on the cable length, local and remote receiver.
1535 **/
1536s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1537{
1538 struct e1000_phy_info *phy = &hw->phy;
1539 s32 ret_val;
1540 u16 data;
1541 bool link;
1542
1543 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1544 if (ret_val)
1545 return ret_val;
1546
1547 if (!link) {
1548 hw_dbg(hw, "Phy info is only valid if link is up\n");
1549 return -E1000_ERR_CONFIG;
1550 }
1551
1552 phy->polarity_correction = 1;
1553
1554 ret_val = e1000_check_polarity_igp(hw);
1555 if (ret_val)
1556 return ret_val;
1557
1558 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1559 if (ret_val)
1560 return ret_val;
1561
1562 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
1563
1564 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1565 IGP01E1000_PSSR_SPEED_1000MBPS) {
1566 ret_val = e1000_get_cable_length(hw);
1567 if (ret_val)
1568 return ret_val;
1569
1570 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
1571 if (ret_val)
1572 return ret_val;
1573
1574 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
1575 ? e1000_1000t_rx_status_ok
1576 : e1000_1000t_rx_status_not_ok;
1577
1578 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
1579 ? e1000_1000t_rx_status_ok
1580 : e1000_1000t_rx_status_not_ok;
1581 } else {
1582 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1583 phy->local_rx = e1000_1000t_rx_status_undefined;
1584 phy->remote_rx = e1000_1000t_rx_status_undefined;
1585 }
1586
1587 return ret_val;
1588}
1589
1590/**
1591 * e1000e_phy_sw_reset - PHY software reset
1592 * @hw: pointer to the HW structure
1593 *
1594 * Does a software reset of the PHY by reading the PHY control register and
1595 * setting/write the control register reset bit to the PHY.
1596 **/
1597s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
1598{
1599 s32 ret_val;
1600 u16 phy_ctrl;
1601
1602 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
1603 if (ret_val)
1604 return ret_val;
1605
1606 phy_ctrl |= MII_CR_RESET;
1607 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
1608 if (ret_val)
1609 return ret_val;
1610
1611 udelay(1);
1612
1613 return ret_val;
1614}
1615
1616/**
1617 * e1000e_phy_hw_reset_generic - PHY hardware reset
1618 * @hw: pointer to the HW structure
1619 *
1620 * Verify the reset block is not blocking us from resetting. Acquire
1621 * semaphore (if necessary) and read/set/write the device control reset
1622 * bit in the PHY. Wait the appropriate delay time for the device to
1623 * reset and relase the semaphore (if necessary).
1624 **/
1625s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
1626{
1627 struct e1000_phy_info *phy = &hw->phy;
1628 s32 ret_val;
1629 u32 ctrl;
1630
1631 ret_val = e1000_check_reset_block(hw);
1632 if (ret_val)
1633 return 0;
1634
1635 ret_val = phy->ops.acquire_phy(hw);
1636 if (ret_val)
1637 return ret_val;
1638
1639 ctrl = er32(CTRL);
1640 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
1641 e1e_flush();
1642
1643 udelay(phy->reset_delay_us);
1644
1645 ew32(CTRL, ctrl);
1646 e1e_flush();
1647
1648 udelay(150);
1649
1650 phy->ops.release_phy(hw);
1651
1652 return e1000_get_phy_cfg_done(hw);
1653}
1654
1655/**
1656 * e1000e_get_cfg_done - Generic configuration done
1657 * @hw: pointer to the HW structure
1658 *
1659 * Generic function to wait 10 milli-seconds for configuration to complete
1660 * and return success.
1661 **/
1662s32 e1000e_get_cfg_done(struct e1000_hw *hw)
1663{
1664 mdelay(10);
1665 return 0;
1666}
1667
1668/* Internal function pointers */
1669
1670/**
1671 * e1000_get_phy_cfg_done - Generic PHY configuration done
1672 * @hw: pointer to the HW structure
1673 *
1674 * Return success if silicon family did not implement a family specific
1675 * get_cfg_done function.
1676 **/
1677static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
1678{
1679 if (hw->phy.ops.get_cfg_done)
1680 return hw->phy.ops.get_cfg_done(hw);
1681
1682 return 0;
1683}
1684
1685/**
1686 * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
1687 * @hw: pointer to the HW structure
1688 *
1689 * When the silicon family has not implemented a forced speed/duplex
1690 * function for the PHY, simply return 0.
1691 **/
1692static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1693{
1694 if (hw->phy.ops.force_speed_duplex)
1695 return hw->phy.ops.force_speed_duplex(hw);
1696
1697 return 0;
1698}
1699
1700/**
1701 * e1000e_get_phy_type_from_id - Get PHY type from id
1702 * @phy_id: phy_id read from the phy
1703 *
1704 * Returns the phy type from the id.
1705 **/
1706enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1707{
1708 enum e1000_phy_type phy_type = e1000_phy_unknown;
1709
1710 switch (phy_id) {
1711 case M88E1000_I_PHY_ID:
1712 case M88E1000_E_PHY_ID:
1713 case M88E1111_I_PHY_ID:
1714 case M88E1011_I_PHY_ID:
1715 phy_type = e1000_phy_m88;
1716 break;
1717 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
1718 phy_type = e1000_phy_igp_2;
1719 break;
1720 case GG82563_E_PHY_ID:
1721 phy_type = e1000_phy_gg82563;
1722 break;
1723 case IGP03E1000_E_PHY_ID:
1724 phy_type = e1000_phy_igp_3;
1725 break;
1726 case IFE_E_PHY_ID:
1727 case IFE_PLUS_E_PHY_ID:
1728 case IFE_C_E_PHY_ID:
1729 phy_type = e1000_phy_ife;
1730 break;
1731 default:
1732 phy_type = e1000_phy_unknown;
1733 break;
1734 }
1735 return phy_type;
1736}
1737
1738/**
1739 * e1000e_commit_phy - Soft PHY reset
1740 * @hw: pointer to the HW structure
1741 *
1742 * Performs a soft PHY reset on those that apply. This is a function pointer
1743 * entry point called by drivers.
1744 **/
1745s32 e1000e_commit_phy(struct e1000_hw *hw)
1746{
1747 if (hw->phy.ops.commit_phy)
1748 return hw->phy.ops.commit_phy(hw);
1749
1750 return 0;
1751}
1752
1753/**
1754 * e1000_set_d0_lplu_state - Sets low power link up state for D0
1755 * @hw: pointer to the HW structure
1756 * @active: boolean used to enable/disable lplu
1757 *
1758 * Success returns 0, Failure returns 1
1759 *
1760 * The low power link up (lplu) state is set to the power management level D0
1761 * and SmartSpeed is disabled when active is true, else clear lplu for D0
1762 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1763 * is used during Dx states where the power conservation is most important.
1764 * During driver activity, SmartSpeed should be enabled so performance is
1765 * maintained. This is a function pointer entry point called by drivers.
1766 **/
1767static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
1768{
1769 if (hw->phy.ops.set_d0_lplu_state)
1770 return hw->phy.ops.set_d0_lplu_state(hw, active);
1771
1772 return 0;
1773}