aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/e1000e
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c2117
-rw-r--r--drivers/net/e1000e/Makefile37
-rw-r--r--drivers/net/e1000e/defines.h844
-rw-r--r--drivers/net/e1000e/e1000.h741
-rw-r--r--drivers/net/e1000e/es2lan.c1516
-rw-r--r--drivers/net/e1000e/ethtool.c2082
-rw-r--r--drivers/net/e1000e/hw.h984
-rw-r--r--drivers/net/e1000e/ich8lan.c4150
-rw-r--r--drivers/net/e1000e/lib.c2693
-rw-r--r--drivers/net/e1000e/netdev.c6393
-rw-r--r--drivers/net/e1000e/param.c478
-rw-r--r--drivers/net/e1000e/phy.c3377
12 files changed, 25412 insertions, 0 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
new file mode 100644
index 00000000000..536b3a55c45
--- /dev/null
+++ b/drivers/net/e1000e/82571.c
@@ -0,0 +1,2117 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Copper)
32 * 82571EB Gigabit Ethernet Controller (Fiber)
33 * 82571EB Dual Port Gigabit Mezzanine Adapter
34 * 82571EB Quad Port Gigabit Mezzanine Adapter
35 * 82571PT Gigabit PT Quad Port Server ExpressModule
36 * 82572EI Gigabit Ethernet Controller (Copper)
37 * 82572EI Gigabit Ethernet Controller (Fiber)
38 * 82572EI Gigabit Ethernet Controller
39 * 82573V Gigabit Ethernet Controller (Copper)
40 * 82573E Gigabit Ethernet Controller (Copper)
41 * 82573L Gigabit Ethernet Controller
42 * 82574L Gigabit Network Connection
43 * 82583V Gigabit Network Connection
44 */
45
46#include "e1000.h"
47
48#define ID_LED_RESERVED_F746 0xF746
49#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
50 (ID_LED_OFF1_ON2 << 8) | \
51 (ID_LED_DEF1_DEF2 << 4) | \
52 (ID_LED_DEF1_DEF2))
53
54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
55#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
56#define E1000_BASE1000T_STATUS 10
57#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
58#define E1000_RECEIVE_ERROR_COUNTER 21
59#define E1000_RECEIVE_ERROR_MAX 0xFFFF
60
61#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
62
63static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
64static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
65static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
66static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
67static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
68 u16 words, u16 *data);
69static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
70static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
71static s32 e1000_setup_link_82571(struct e1000_hw *hw);
72static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
73static void e1000_clear_vfta_82571(struct e1000_hw *hw);
74static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
75static s32 e1000_led_on_82574(struct e1000_hw *hw);
76static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
77static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
81static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
82static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
83
84/**
85 * e1000_init_phy_params_82571 - Init PHY func ptrs.
86 * @hw: pointer to the HW structure
87 **/
88static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
89{
90 struct e1000_phy_info *phy = &hw->phy;
91 s32 ret_val;
92
93 if (hw->phy.media_type != e1000_media_type_copper) {
94 phy->type = e1000_phy_none;
95 return 0;
96 }
97
98 phy->addr = 1;
99 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
100 phy->reset_delay_us = 100;
101
102 phy->ops.power_up = e1000_power_up_phy_copper;
103 phy->ops.power_down = e1000_power_down_phy_copper_82571;
104
105 switch (hw->mac.type) {
106 case e1000_82571:
107 case e1000_82572:
108 phy->type = e1000_phy_igp_2;
109 break;
110 case e1000_82573:
111 phy->type = e1000_phy_m88;
112 break;
113 case e1000_82574:
114 case e1000_82583:
115 phy->type = e1000_phy_bm;
116 phy->ops.acquire = e1000_get_hw_semaphore_82574;
117 phy->ops.release = e1000_put_hw_semaphore_82574;
118 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
119 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
120 break;
121 default:
122 return -E1000_ERR_PHY;
123 break;
124 }
125
126 /* This can only be done after all function pointers are setup. */
127 ret_val = e1000_get_phy_id_82571(hw);
128 if (ret_val) {
129 e_dbg("Error getting PHY ID\n");
130 return ret_val;
131 }
132
133 /* Verify phy id */
134 switch (hw->mac.type) {
135 case e1000_82571:
136 case e1000_82572:
137 if (phy->id != IGP01E1000_I_PHY_ID)
138 ret_val = -E1000_ERR_PHY;
139 break;
140 case e1000_82573:
141 if (phy->id != M88E1111_I_PHY_ID)
142 ret_val = -E1000_ERR_PHY;
143 break;
144 case e1000_82574:
145 case e1000_82583:
146 if (phy->id != BME1000_E_PHY_ID_R2)
147 ret_val = -E1000_ERR_PHY;
148 break;
149 default:
150 ret_val = -E1000_ERR_PHY;
151 break;
152 }
153
154 if (ret_val)
155 e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
156
157 return ret_val;
158}
159
160/**
161 * e1000_init_nvm_params_82571 - Init NVM func ptrs.
162 * @hw: pointer to the HW structure
163 **/
164static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
165{
166 struct e1000_nvm_info *nvm = &hw->nvm;
167 u32 eecd = er32(EECD);
168 u16 size;
169
170 nvm->opcode_bits = 8;
171 nvm->delay_usec = 1;
172 switch (nvm->override) {
173 case e1000_nvm_override_spi_large:
174 nvm->page_size = 32;
175 nvm->address_bits = 16;
176 break;
177 case e1000_nvm_override_spi_small:
178 nvm->page_size = 8;
179 nvm->address_bits = 8;
180 break;
181 default:
182 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
183 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
184 break;
185 }
186
187 switch (hw->mac.type) {
188 case e1000_82573:
189 case e1000_82574:
190 case e1000_82583:
191 if (((eecd >> 15) & 0x3) == 0x3) {
192 nvm->type = e1000_nvm_flash_hw;
193 nvm->word_size = 2048;
194 /*
195 * Autonomous Flash update bit must be cleared due
196 * to Flash update issue.
197 */
198 eecd &= ~E1000_EECD_AUPDEN;
199 ew32(EECD, eecd);
200 break;
201 }
202 /* Fall Through */
203 default:
204 nvm->type = e1000_nvm_eeprom_spi;
205 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
206 E1000_EECD_SIZE_EX_SHIFT);
207 /*
208 * Added to a constant, "size" becomes the left-shift value
209 * for setting word_size.
210 */
211 size += NVM_WORD_SIZE_BASE_SHIFT;
212
213 /* EEPROM access above 16k is unsupported */
214 if (size > 14)
215 size = 14;
216 nvm->word_size = 1 << size;
217 break;
218 }
219
220 /* Function Pointers */
221 switch (hw->mac.type) {
222 case e1000_82574:
223 case e1000_82583:
224 nvm->ops.acquire = e1000_get_hw_semaphore_82574;
225 nvm->ops.release = e1000_put_hw_semaphore_82574;
226 break;
227 default:
228 break;
229 }
230
231 return 0;
232}
233
234/**
235 * e1000_init_mac_params_82571 - Init MAC func ptrs.
236 * @hw: pointer to the HW structure
237 **/
238static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
239{
240 struct e1000_hw *hw = &adapter->hw;
241 struct e1000_mac_info *mac = &hw->mac;
242 struct e1000_mac_operations *func = &mac->ops;
243 u32 swsm = 0;
244 u32 swsm2 = 0;
245 bool force_clear_smbi = false;
246
247 /* Set media type */
248 switch (adapter->pdev->device) {
249 case E1000_DEV_ID_82571EB_FIBER:
250 case E1000_DEV_ID_82572EI_FIBER:
251 case E1000_DEV_ID_82571EB_QUAD_FIBER:
252 hw->phy.media_type = e1000_media_type_fiber;
253 break;
254 case E1000_DEV_ID_82571EB_SERDES:
255 case E1000_DEV_ID_82572EI_SERDES:
256 case E1000_DEV_ID_82571EB_SERDES_DUAL:
257 case E1000_DEV_ID_82571EB_SERDES_QUAD:
258 hw->phy.media_type = e1000_media_type_internal_serdes;
259 break;
260 default:
261 hw->phy.media_type = e1000_media_type_copper;
262 break;
263 }
264
265 /* Set mta register count */
266 mac->mta_reg_count = 128;
267 /* Set rar entry count */
268 mac->rar_entry_count = E1000_RAR_ENTRIES;
269 /* Adaptive IFS supported */
270 mac->adaptive_ifs = true;
271
272 /* check for link */
273 switch (hw->phy.media_type) {
274 case e1000_media_type_copper:
275 func->setup_physical_interface = e1000_setup_copper_link_82571;
276 func->check_for_link = e1000e_check_for_copper_link;
277 func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
278 break;
279 case e1000_media_type_fiber:
280 func->setup_physical_interface =
281 e1000_setup_fiber_serdes_link_82571;
282 func->check_for_link = e1000e_check_for_fiber_link;
283 func->get_link_up_info =
284 e1000e_get_speed_and_duplex_fiber_serdes;
285 break;
286 case e1000_media_type_internal_serdes:
287 func->setup_physical_interface =
288 e1000_setup_fiber_serdes_link_82571;
289 func->check_for_link = e1000_check_for_serdes_link_82571;
290 func->get_link_up_info =
291 e1000e_get_speed_and_duplex_fiber_serdes;
292 break;
293 default:
294 return -E1000_ERR_CONFIG;
295 break;
296 }
297
298 switch (hw->mac.type) {
299 case e1000_82573:
300 func->set_lan_id = e1000_set_lan_id_single_port;
301 func->check_mng_mode = e1000e_check_mng_mode_generic;
302 func->led_on = e1000e_led_on_generic;
303 func->blink_led = e1000e_blink_led_generic;
304
305 /* FWSM register */
306 mac->has_fwsm = true;
307 /*
308 * ARC supported; valid only if manageability features are
309 * enabled.
310 */
311 mac->arc_subsystem_valid =
312 (er32(FWSM) & E1000_FWSM_MODE_MASK)
313 ? true : false;
314 break;
315 case e1000_82574:
316 case e1000_82583:
317 func->set_lan_id = e1000_set_lan_id_single_port;
318 func->check_mng_mode = e1000_check_mng_mode_82574;
319 func->led_on = e1000_led_on_82574;
320 break;
321 default:
322 func->check_mng_mode = e1000e_check_mng_mode_generic;
323 func->led_on = e1000e_led_on_generic;
324 func->blink_led = e1000e_blink_led_generic;
325
326 /* FWSM register */
327 mac->has_fwsm = true;
328 break;
329 }
330
331 /*
332 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
333 * first NVM or PHY access. This should be done for single-port
334 * devices, and for one port only on dual-port devices so that
335 * for those devices we can still use the SMBI lock to synchronize
336 * inter-port accesses to the PHY & NVM.
337 */
338 switch (hw->mac.type) {
339 case e1000_82571:
340 case e1000_82572:
341 swsm2 = er32(SWSM2);
342
343 if (!(swsm2 & E1000_SWSM2_LOCK)) {
344 /* Only do this for the first interface on this card */
345 ew32(SWSM2,
346 swsm2 | E1000_SWSM2_LOCK);
347 force_clear_smbi = true;
348 } else
349 force_clear_smbi = false;
350 break;
351 default:
352 force_clear_smbi = true;
353 break;
354 }
355
356 if (force_clear_smbi) {
357 /* Make sure SWSM.SMBI is clear */
358 swsm = er32(SWSM);
359 if (swsm & E1000_SWSM_SMBI) {
360 /* This bit should not be set on a first interface, and
361 * indicates that the bootagent or EFI code has
362 * improperly left this bit enabled
363 */
364 e_dbg("Please update your 82571 Bootagent\n");
365 }
366 ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
367 }
368
369 /*
370 * Initialize device specific counter of SMBI acquisition
371 * timeouts.
372 */
373 hw->dev_spec.e82571.smb_counter = 0;
374
375 return 0;
376}
377
378static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
379{
380 struct e1000_hw *hw = &adapter->hw;
381 static int global_quad_port_a; /* global port a indication */
382 struct pci_dev *pdev = adapter->pdev;
383 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
384 s32 rc;
385
386 rc = e1000_init_mac_params_82571(adapter);
387 if (rc)
388 return rc;
389
390 rc = e1000_init_nvm_params_82571(hw);
391 if (rc)
392 return rc;
393
394 rc = e1000_init_phy_params_82571(hw);
395 if (rc)
396 return rc;
397
398 /* tag quad port adapters first, it's used below */
399 switch (pdev->device) {
400 case E1000_DEV_ID_82571EB_QUAD_COPPER:
401 case E1000_DEV_ID_82571EB_QUAD_FIBER:
402 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
403 case E1000_DEV_ID_82571PT_QUAD_COPPER:
404 adapter->flags |= FLAG_IS_QUAD_PORT;
405 /* mark the first port */
406 if (global_quad_port_a == 0)
407 adapter->flags |= FLAG_IS_QUAD_PORT_A;
408 /* Reset for multiple quad port adapters */
409 global_quad_port_a++;
410 if (global_quad_port_a == 4)
411 global_quad_port_a = 0;
412 break;
413 default:
414 break;
415 }
416
417 switch (adapter->hw.mac.type) {
418 case e1000_82571:
419 /* these dual ports don't have WoL on port B at all */
420 if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
421 (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
422 (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
423 (is_port_b))
424 adapter->flags &= ~FLAG_HAS_WOL;
425 /* quad ports only support WoL on port A */
426 if (adapter->flags & FLAG_IS_QUAD_PORT &&
427 (!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
428 adapter->flags &= ~FLAG_HAS_WOL;
429 /* Does not support WoL on any port */
430 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
431 adapter->flags &= ~FLAG_HAS_WOL;
432 break;
433 case e1000_82573:
434 if (pdev->device == E1000_DEV_ID_82573L) {
435 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
436 adapter->max_hw_frame_size = DEFAULT_JUMBO;
437 }
438 break;
439 default:
440 break;
441 }
442
443 return 0;
444}
445
446/**
447 * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
448 * @hw: pointer to the HW structure
449 *
450 * Reads the PHY registers and stores the PHY ID and possibly the PHY
451 * revision in the hardware structure.
452 **/
453static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
454{
455 struct e1000_phy_info *phy = &hw->phy;
456 s32 ret_val;
457 u16 phy_id = 0;
458
459 switch (hw->mac.type) {
460 case e1000_82571:
461 case e1000_82572:
462 /*
463 * The 82571 firmware may still be configuring the PHY.
464 * In this case, we cannot access the PHY until the
465 * configuration is done. So we explicitly set the
466 * PHY ID.
467 */
468 phy->id = IGP01E1000_I_PHY_ID;
469 break;
470 case e1000_82573:
471 return e1000e_get_phy_id(hw);
472 break;
473 case e1000_82574:
474 case e1000_82583:
475 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
476 if (ret_val)
477 return ret_val;
478
479 phy->id = (u32)(phy_id << 16);
480 udelay(20);
481 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
482 if (ret_val)
483 return ret_val;
484
485 phy->id |= (u32)(phy_id);
486 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
487 break;
488 default:
489 return -E1000_ERR_PHY;
490 break;
491 }
492
493 return 0;
494}
495
496/**
497 * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
498 * @hw: pointer to the HW structure
499 *
500 * Acquire the HW semaphore to access the PHY or NVM
501 **/
502static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
503{
504 u32 swsm;
505 s32 sw_timeout = hw->nvm.word_size + 1;
506 s32 fw_timeout = hw->nvm.word_size + 1;
507 s32 i = 0;
508
509 /*
510 * If we have timedout 3 times on trying to acquire
511 * the inter-port SMBI semaphore, there is old code
512 * operating on the other port, and it is not
513 * releasing SMBI. Modify the number of times that
514 * we try for the semaphore to interwork with this
515 * older code.
516 */
517 if (hw->dev_spec.e82571.smb_counter > 2)
518 sw_timeout = 1;
519
520 /* Get the SW semaphore */
521 while (i < sw_timeout) {
522 swsm = er32(SWSM);
523 if (!(swsm & E1000_SWSM_SMBI))
524 break;
525
526 udelay(50);
527 i++;
528 }
529
530 if (i == sw_timeout) {
531 e_dbg("Driver can't access device - SMBI bit is set.\n");
532 hw->dev_spec.e82571.smb_counter++;
533 }
534 /* Get the FW semaphore. */
535 for (i = 0; i < fw_timeout; i++) {
536 swsm = er32(SWSM);
537 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
538
539 /* Semaphore acquired if bit latched */
540 if (er32(SWSM) & E1000_SWSM_SWESMBI)
541 break;
542
543 udelay(50);
544 }
545
546 if (i == fw_timeout) {
547 /* Release semaphores */
548 e1000_put_hw_semaphore_82571(hw);
549 e_dbg("Driver can't access the NVM\n");
550 return -E1000_ERR_NVM;
551 }
552
553 return 0;
554}
555
556/**
557 * e1000_put_hw_semaphore_82571 - Release hardware semaphore
558 * @hw: pointer to the HW structure
559 *
560 * Release hardware semaphore used to access the PHY or NVM
561 **/
562static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
563{
564 u32 swsm;
565
566 swsm = er32(SWSM);
567 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
568 ew32(SWSM, swsm);
569}
570/**
571 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
572 * @hw: pointer to the HW structure
573 *
574 * Acquire the HW semaphore during reset.
575 *
576 **/
577static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
578{
579 u32 extcnf_ctrl;
580 s32 ret_val = 0;
581 s32 i = 0;
582
583 extcnf_ctrl = er32(EXTCNF_CTRL);
584 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
585 do {
586 ew32(EXTCNF_CTRL, extcnf_ctrl);
587 extcnf_ctrl = er32(EXTCNF_CTRL);
588
589 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
590 break;
591
592 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
593
594 usleep_range(2000, 4000);
595 i++;
596 } while (i < MDIO_OWNERSHIP_TIMEOUT);
597
598 if (i == MDIO_OWNERSHIP_TIMEOUT) {
599 /* Release semaphores */
600 e1000_put_hw_semaphore_82573(hw);
601 e_dbg("Driver can't access the PHY\n");
602 ret_val = -E1000_ERR_PHY;
603 goto out;
604 }
605
606out:
607 return ret_val;
608}
609
610/**
611 * e1000_put_hw_semaphore_82573 - Release hardware semaphore
612 * @hw: pointer to the HW structure
613 *
614 * Release hardware semaphore used during reset.
615 *
616 **/
617static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
618{
619 u32 extcnf_ctrl;
620
621 extcnf_ctrl = er32(EXTCNF_CTRL);
622 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
623 ew32(EXTCNF_CTRL, extcnf_ctrl);
624}
625
626static DEFINE_MUTEX(swflag_mutex);
627
628/**
629 * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
630 * @hw: pointer to the HW structure
631 *
632 * Acquire the HW semaphore to access the PHY or NVM.
633 *
634 **/
635static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
636{
637 s32 ret_val;
638
639 mutex_lock(&swflag_mutex);
640 ret_val = e1000_get_hw_semaphore_82573(hw);
641 if (ret_val)
642 mutex_unlock(&swflag_mutex);
643 return ret_val;
644}
645
646/**
647 * e1000_put_hw_semaphore_82574 - Release hardware semaphore
648 * @hw: pointer to the HW structure
649 *
650 * Release hardware semaphore used to access the PHY or NVM
651 *
652 **/
653static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
654{
655 e1000_put_hw_semaphore_82573(hw);
656 mutex_unlock(&swflag_mutex);
657}
658
659/**
660 * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
661 * @hw: pointer to the HW structure
662 * @active: true to enable LPLU, false to disable
663 *
664 * Sets the LPLU D0 state according to the active flag.
665 * LPLU will not be activated unless the
666 * device autonegotiation advertisement meets standards of
667 * either 10 or 10/100 or 10/100/1000 at all duplexes.
668 * This is a function pointer entry point only called by
669 * PHY setup routines.
670 **/
671static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
672{
673 u16 data = er32(POEMB);
674
675 if (active)
676 data |= E1000_PHY_CTRL_D0A_LPLU;
677 else
678 data &= ~E1000_PHY_CTRL_D0A_LPLU;
679
680 ew32(POEMB, data);
681 return 0;
682}
683
684/**
685 * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
686 * @hw: pointer to the HW structure
687 * @active: boolean used to enable/disable lplu
688 *
689 * The low power link up (lplu) state is set to the power management level D3
690 * when active is true, else clear lplu for D3. LPLU
691 * is used during Dx states where the power conservation is most important.
692 * During driver activity, SmartSpeed should be enabled so performance is
693 * maintained.
694 **/
695static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
696{
697 u16 data = er32(POEMB);
698
699 if (!active) {
700 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
701 } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
702 (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
703 (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
704 data |= E1000_PHY_CTRL_NOND0A_LPLU;
705 }
706
707 ew32(POEMB, data);
708 return 0;
709}
710
711/**
712 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
713 * @hw: pointer to the HW structure
714 *
715 * To gain access to the EEPROM, first we must obtain a hardware semaphore.
716 * Then for non-82573 hardware, set the EEPROM access request bit and wait
717 * for EEPROM access grant bit. If the access grant bit is not set, release
718 * hardware semaphore.
719 **/
720static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
721{
722 s32 ret_val;
723
724 ret_val = e1000_get_hw_semaphore_82571(hw);
725 if (ret_val)
726 return ret_val;
727
728 switch (hw->mac.type) {
729 case e1000_82573:
730 break;
731 default:
732 ret_val = e1000e_acquire_nvm(hw);
733 break;
734 }
735
736 if (ret_val)
737 e1000_put_hw_semaphore_82571(hw);
738
739 return ret_val;
740}
741
742/**
743 * e1000_release_nvm_82571 - Release exclusive access to EEPROM
744 * @hw: pointer to the HW structure
745 *
746 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
747 **/
748static void e1000_release_nvm_82571(struct e1000_hw *hw)
749{
750 e1000e_release_nvm(hw);
751 e1000_put_hw_semaphore_82571(hw);
752}
753
754/**
755 * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
756 * @hw: pointer to the HW structure
757 * @offset: offset within the EEPROM to be written to
758 * @words: number of words to write
759 * @data: 16 bit word(s) to be written to the EEPROM
760 *
761 * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
762 *
763 * If e1000e_update_nvm_checksum is not called after this function, the
764 * EEPROM will most likely contain an invalid checksum.
765 **/
766static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
767 u16 *data)
768{
769 s32 ret_val;
770
771 switch (hw->mac.type) {
772 case e1000_82573:
773 case e1000_82574:
774 case e1000_82583:
775 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
776 break;
777 case e1000_82571:
778 case e1000_82572:
779 ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
780 break;
781 default:
782 ret_val = -E1000_ERR_NVM;
783 break;
784 }
785
786 return ret_val;
787}
788
789/**
790 * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
791 * @hw: pointer to the HW structure
792 *
793 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
794 * up to the checksum. Then calculates the EEPROM checksum and writes the
795 * value to the EEPROM.
796 **/
797static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
798{
799 u32 eecd;
800 s32 ret_val;
801 u16 i;
802
803 ret_val = e1000e_update_nvm_checksum_generic(hw);
804 if (ret_val)
805 return ret_val;
806
807 /*
808 * If our nvm is an EEPROM, then we're done
809 * otherwise, commit the checksum to the flash NVM.
810 */
811 if (hw->nvm.type != e1000_nvm_flash_hw)
812 return ret_val;
813
814 /* Check for pending operations. */
815 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
816 usleep_range(1000, 2000);
817 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
818 break;
819 }
820
821 if (i == E1000_FLASH_UPDATES)
822 return -E1000_ERR_NVM;
823
824 /* Reset the firmware if using STM opcode. */
825 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
826 /*
827 * The enabling of and the actual reset must be done
828 * in two write cycles.
829 */
830 ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
831 e1e_flush();
832 ew32(HICR, E1000_HICR_FW_RESET);
833 }
834
835 /* Commit the write to flash */
836 eecd = er32(EECD) | E1000_EECD_FLUPD;
837 ew32(EECD, eecd);
838
839 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
840 usleep_range(1000, 2000);
841 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
842 break;
843 }
844
845 if (i == E1000_FLASH_UPDATES)
846 return -E1000_ERR_NVM;
847
848 return 0;
849}
850
851/**
852 * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
853 * @hw: pointer to the HW structure
854 *
855 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
856 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
857 **/
858static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
859{
860 if (hw->nvm.type == e1000_nvm_flash_hw)
861 e1000_fix_nvm_checksum_82571(hw);
862
863 return e1000e_validate_nvm_checksum_generic(hw);
864}
865
866/**
867 * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
868 * @hw: pointer to the HW structure
869 * @offset: offset within the EEPROM to be written to
870 * @words: number of words to write
871 * @data: 16 bit word(s) to be written to the EEPROM
872 *
873 * After checking for invalid values, poll the EEPROM to ensure the previous
874 * command has completed before trying to write the next word. After write
875 * poll for completion.
876 *
877 * If e1000e_update_nvm_checksum is not called after this function, the
878 * EEPROM will most likely contain an invalid checksum.
879 **/
880static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
881 u16 words, u16 *data)
882{
883 struct e1000_nvm_info *nvm = &hw->nvm;
884 u32 i, eewr = 0;
885 s32 ret_val = 0;
886
887 /*
888 * A check for invalid values: offset too large, too many words,
889 * and not enough words.
890 */
891 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
892 (words == 0)) {
893 e_dbg("nvm parameter(s) out of bounds\n");
894 return -E1000_ERR_NVM;
895 }
896
897 for (i = 0; i < words; i++) {
898 eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
899 ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
900 E1000_NVM_RW_REG_START;
901
902 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
903 if (ret_val)
904 break;
905
906 ew32(EEWR, eewr);
907
908 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
909 if (ret_val)
910 break;
911 }
912
913 return ret_val;
914}
915
916/**
917 * e1000_get_cfg_done_82571 - Poll for configuration done
918 * @hw: pointer to the HW structure
919 *
920 * Reads the management control register for the config done bit to be set.
921 **/
922static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
923{
924 s32 timeout = PHY_CFG_TIMEOUT;
925
926 while (timeout) {
927 if (er32(EEMNGCTL) &
928 E1000_NVM_CFG_DONE_PORT_0)
929 break;
930 usleep_range(1000, 2000);
931 timeout--;
932 }
933 if (!timeout) {
934 e_dbg("MNG configuration cycle has not completed.\n");
935 return -E1000_ERR_RESET;
936 }
937
938 return 0;
939}
940
941/**
942 * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
943 * @hw: pointer to the HW structure
944 * @active: true to enable LPLU, false to disable
945 *
946 * Sets the LPLU D0 state according to the active flag. When activating LPLU
947 * this function also disables smart speed and vice versa. LPLU will not be
948 * activated unless the device autonegotiation advertisement meets standards
949 * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
950 * pointer entry point only called by PHY setup routines.
951 **/
952static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
953{
954 struct e1000_phy_info *phy = &hw->phy;
955 s32 ret_val;
956 u16 data;
957
958 ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
959 if (ret_val)
960 return ret_val;
961
962 if (active) {
963 data |= IGP02E1000_PM_D0_LPLU;
964 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
965 if (ret_val)
966 return ret_val;
967
968 /* When LPLU is enabled, we should disable SmartSpeed */
969 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
970 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
971 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
972 if (ret_val)
973 return ret_val;
974 } else {
975 data &= ~IGP02E1000_PM_D0_LPLU;
976 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
977 /*
978 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
979 * during Dx states where the power conservation is most
980 * important. During driver activity we should enable
981 * SmartSpeed, so performance is maintained.
982 */
983 if (phy->smart_speed == e1000_smart_speed_on) {
984 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
985 &data);
986 if (ret_val)
987 return ret_val;
988
989 data |= IGP01E1000_PSCFR_SMART_SPEED;
990 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
991 data);
992 if (ret_val)
993 return ret_val;
994 } else if (phy->smart_speed == e1000_smart_speed_off) {
995 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
996 &data);
997 if (ret_val)
998 return ret_val;
999
1000 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1001 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1002 data);
1003 if (ret_val)
1004 return ret_val;
1005 }
1006 }
1007
1008 return 0;
1009}
1010
1011/**
1012 * e1000_reset_hw_82571 - Reset hardware
1013 * @hw: pointer to the HW structure
1014 *
1015 * This resets the hardware into a known state.
1016 **/
1017static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1018{
1019 u32 ctrl, ctrl_ext;
1020 s32 ret_val;
1021
1022 /*
1023 * Prevent the PCI-E bus from sticking if there is no TLP connection
1024 * on the last TLP read/write transaction when MAC is reset.
1025 */
1026 ret_val = e1000e_disable_pcie_master(hw);
1027 if (ret_val)
1028 e_dbg("PCI-E Master disable polling has failed.\n");
1029
1030 e_dbg("Masking off all interrupts\n");
1031 ew32(IMC, 0xffffffff);
1032
1033 ew32(RCTL, 0);
1034 ew32(TCTL, E1000_TCTL_PSP);
1035 e1e_flush();
1036
1037 usleep_range(10000, 20000);
1038
1039 /*
1040 * Must acquire the MDIO ownership before MAC reset.
1041 * Ownership defaults to firmware after a reset.
1042 */
1043 switch (hw->mac.type) {
1044 case e1000_82573:
1045 ret_val = e1000_get_hw_semaphore_82573(hw);
1046 break;
1047 case e1000_82574:
1048 case e1000_82583:
1049 ret_val = e1000_get_hw_semaphore_82574(hw);
1050 break;
1051 default:
1052 break;
1053 }
1054 if (ret_val)
1055 e_dbg("Cannot acquire MDIO ownership\n");
1056
1057 ctrl = er32(CTRL);
1058
1059 e_dbg("Issuing a global reset to MAC\n");
1060 ew32(CTRL, ctrl | E1000_CTRL_RST);
1061
1062 /* Must release MDIO ownership and mutex after MAC reset. */
1063 switch (hw->mac.type) {
1064 case e1000_82574:
1065 case e1000_82583:
1066 e1000_put_hw_semaphore_82574(hw);
1067 break;
1068 default:
1069 break;
1070 }
1071
1072 if (hw->nvm.type == e1000_nvm_flash_hw) {
1073 udelay(10);
1074 ctrl_ext = er32(CTRL_EXT);
1075 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
1076 ew32(CTRL_EXT, ctrl_ext);
1077 e1e_flush();
1078 }
1079
1080 ret_val = e1000e_get_auto_rd_done(hw);
1081 if (ret_val)
1082 /* We don't want to continue accessing MAC registers. */
1083 return ret_val;
1084
1085 /*
1086 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
1087 * Need to wait for Phy configuration completion before accessing
1088 * NVM and Phy.
1089 */
1090
1091 switch (hw->mac.type) {
1092 case e1000_82573:
1093 case e1000_82574:
1094 case e1000_82583:
1095 msleep(25);
1096 break;
1097 default:
1098 break;
1099 }
1100
1101 /* Clear any pending interrupt events. */
1102 ew32(IMC, 0xffffffff);
1103 er32(ICR);
1104
1105 if (hw->mac.type == e1000_82571) {
1106 /* Install any alternate MAC address into RAR0 */
1107 ret_val = e1000_check_alt_mac_addr_generic(hw);
1108 if (ret_val)
1109 return ret_val;
1110
1111 e1000e_set_laa_state_82571(hw, true);
1112 }
1113
1114 /* Reinitialize the 82571 serdes link state machine */
1115 if (hw->phy.media_type == e1000_media_type_internal_serdes)
1116 hw->mac.serdes_link_state = e1000_serdes_link_down;
1117
1118 return 0;
1119}
1120
1121/**
1122 * e1000_init_hw_82571 - Initialize hardware
1123 * @hw: pointer to the HW structure
1124 *
1125 * This inits the hardware readying it for operation.
1126 **/
1127static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1128{
1129 struct e1000_mac_info *mac = &hw->mac;
1130 u32 reg_data;
1131 s32 ret_val;
1132 u16 i, rar_count = mac->rar_entry_count;
1133
1134 e1000_initialize_hw_bits_82571(hw);
1135
1136 /* Initialize identification LED */
1137 ret_val = e1000e_id_led_init(hw);
1138 if (ret_val)
1139 e_dbg("Error initializing identification LED\n");
1140 /* This is not fatal and we should not stop init due to this */
1141
1142 /* Disabling VLAN filtering */
1143 e_dbg("Initializing the IEEE VLAN\n");
1144 mac->ops.clear_vfta(hw);
1145
1146 /* Setup the receive address. */
1147 /*
1148 * If, however, a locally administered address was assigned to the
1149 * 82571, we must reserve a RAR for it to work around an issue where
1150 * resetting one port will reload the MAC on the other port.
1151 */
1152 if (e1000e_get_laa_state_82571(hw))
1153 rar_count--;
1154 e1000e_init_rx_addrs(hw, rar_count);
1155
1156 /* Zero out the Multicast HASH table */
1157 e_dbg("Zeroing the MTA\n");
1158 for (i = 0; i < mac->mta_reg_count; i++)
1159 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1160
1161 /* Setup link and flow control */
1162 ret_val = e1000_setup_link_82571(hw);
1163
1164 /* Set the transmit descriptor write-back policy */
1165 reg_data = er32(TXDCTL(0));
1166 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
1167 E1000_TXDCTL_FULL_TX_DESC_WB |
1168 E1000_TXDCTL_COUNT_DESC;
1169 ew32(TXDCTL(0), reg_data);
1170
1171 /* ...for both queues. */
1172 switch (mac->type) {
1173 case e1000_82573:
1174 e1000e_enable_tx_pkt_filtering(hw);
1175 /* fall through */
1176 case e1000_82574:
1177 case e1000_82583:
1178 reg_data = er32(GCR);
1179 reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1180 ew32(GCR, reg_data);
1181 break;
1182 default:
1183 reg_data = er32(TXDCTL(1));
1184 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
1185 E1000_TXDCTL_FULL_TX_DESC_WB |
1186 E1000_TXDCTL_COUNT_DESC;
1187 ew32(TXDCTL(1), reg_data);
1188 break;
1189 }
1190
1191 /*
1192 * Clear all of the statistics registers (clear on read). It is
1193 * important that we do this after we have tried to establish link
1194 * because the symbol error count will increment wildly if there
1195 * is no link.
1196 */
1197 e1000_clear_hw_cntrs_82571(hw);
1198
1199 return ret_val;
1200}
1201
1202/**
1203 * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
1204 * @hw: pointer to the HW structure
1205 *
1206 * Initializes required hardware-dependent bits needed for normal operation.
1207 **/
1208static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1209{
1210 u32 reg;
1211
1212 /* Transmit Descriptor Control 0 */
1213 reg = er32(TXDCTL(0));
1214 reg |= (1 << 22);
1215 ew32(TXDCTL(0), reg);
1216
1217 /* Transmit Descriptor Control 1 */
1218 reg = er32(TXDCTL(1));
1219 reg |= (1 << 22);
1220 ew32(TXDCTL(1), reg);
1221
1222 /* Transmit Arbitration Control 0 */
1223 reg = er32(TARC(0));
1224 reg &= ~(0xF << 27); /* 30:27 */
1225 switch (hw->mac.type) {
1226 case e1000_82571:
1227 case e1000_82572:
1228 reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
1229 break;
1230 default:
1231 break;
1232 }
1233 ew32(TARC(0), reg);
1234
1235 /* Transmit Arbitration Control 1 */
1236 reg = er32(TARC(1));
1237 switch (hw->mac.type) {
1238 case e1000_82571:
1239 case e1000_82572:
1240 reg &= ~((1 << 29) | (1 << 30));
1241 reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
1242 if (er32(TCTL) & E1000_TCTL_MULR)
1243 reg &= ~(1 << 28);
1244 else
1245 reg |= (1 << 28);
1246 ew32(TARC(1), reg);
1247 break;
1248 default:
1249 break;
1250 }
1251
1252 /* Device Control */
1253 switch (hw->mac.type) {
1254 case e1000_82573:
1255 case e1000_82574:
1256 case e1000_82583:
1257 reg = er32(CTRL);
1258 reg &= ~(1 << 29);
1259 ew32(CTRL, reg);
1260 break;
1261 default:
1262 break;
1263 }
1264
1265 /* Extended Device Control */
1266 switch (hw->mac.type) {
1267 case e1000_82573:
1268 case e1000_82574:
1269 case e1000_82583:
1270 reg = er32(CTRL_EXT);
1271 reg &= ~(1 << 23);
1272 reg |= (1 << 22);
1273 ew32(CTRL_EXT, reg);
1274 break;
1275 default:
1276 break;
1277 }
1278
1279 if (hw->mac.type == e1000_82571) {
1280 reg = er32(PBA_ECC);
1281 reg |= E1000_PBA_ECC_CORR_EN;
1282 ew32(PBA_ECC, reg);
1283 }
1284 /*
1285 * Workaround for hardware errata.
1286 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
1287 */
1288
1289 if ((hw->mac.type == e1000_82571) ||
1290 (hw->mac.type == e1000_82572)) {
1291 reg = er32(CTRL_EXT);
1292 reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
1293 ew32(CTRL_EXT, reg);
1294 }
1295
1296
1297 /* PCI-Ex Control Registers */
1298 switch (hw->mac.type) {
1299 case e1000_82574:
1300 case e1000_82583:
1301 reg = er32(GCR);
1302 reg |= (1 << 22);
1303 ew32(GCR, reg);
1304
1305 /*
1306 * Workaround for hardware errata.
1307 * apply workaround for hardware errata documented in errata
1308 * docs Fixes issue where some error prone or unreliable PCIe
1309 * completions are occurring, particularly with ASPM enabled.
1310 * Without fix, issue can cause Tx timeouts.
1311 */
1312 reg = er32(GCR2);
1313 reg |= 1;
1314 ew32(GCR2, reg);
1315 break;
1316 default:
1317 break;
1318 }
1319}
1320
1321/**
1322 * e1000_clear_vfta_82571 - Clear VLAN filter table
1323 * @hw: pointer to the HW structure
1324 *
1325 * Clears the register array which contains the VLAN filter table by
1326 * setting all the values to 0.
1327 **/
1328static void e1000_clear_vfta_82571(struct e1000_hw *hw)
1329{
1330 u32 offset;
1331 u32 vfta_value = 0;
1332 u32 vfta_offset = 0;
1333 u32 vfta_bit_in_reg = 0;
1334
1335 switch (hw->mac.type) {
1336 case e1000_82573:
1337 case e1000_82574:
1338 case e1000_82583:
1339 if (hw->mng_cookie.vlan_id != 0) {
1340 /*
1341 * The VFTA is a 4096b bit-field, each identifying
1342 * a single VLAN ID. The following operations
1343 * determine which 32b entry (i.e. offset) into the
1344 * array we want to set the VLAN ID (i.e. bit) of
1345 * the manageability unit.
1346 */
1347 vfta_offset = (hw->mng_cookie.vlan_id >>
1348 E1000_VFTA_ENTRY_SHIFT) &
1349 E1000_VFTA_ENTRY_MASK;
1350 vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
1351 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
1352 }
1353 break;
1354 default:
1355 break;
1356 }
1357 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1358 /*
1359 * If the offset we want to clear is the same offset of the
1360 * manageability VLAN ID, then clear all bits except that of
1361 * the manageability unit.
1362 */
1363 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
1364 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
1365 e1e_flush();
1366 }
1367}
1368
1369/**
1370 * e1000_check_mng_mode_82574 - Check manageability is enabled
1371 * @hw: pointer to the HW structure
1372 *
1373 * Reads the NVM Initialization Control Word 2 and returns true
1374 * (>0) if any manageability is enabled, else false (0).
1375 **/
1376static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
1377{
1378 u16 data;
1379
1380 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
1381 return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
1382}
1383
1384/**
1385 * e1000_led_on_82574 - Turn LED on
1386 * @hw: pointer to the HW structure
1387 *
1388 * Turn LED on.
1389 **/
1390static s32 e1000_led_on_82574(struct e1000_hw *hw)
1391{
1392 u32 ctrl;
1393 u32 i;
1394
1395 ctrl = hw->mac.ledctl_mode2;
1396 if (!(E1000_STATUS_LU & er32(STATUS))) {
1397 /*
1398 * If no link, then turn LED on by setting the invert bit
1399 * for each LED that's "on" (0x0E) in ledctl_mode2.
1400 */
1401 for (i = 0; i < 4; i++)
1402 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1403 E1000_LEDCTL_MODE_LED_ON)
1404 ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
1405 }
1406 ew32(LEDCTL, ctrl);
1407
1408 return 0;
1409}
1410
1411/**
1412 * e1000_check_phy_82574 - check 82574 phy hung state
1413 * @hw: pointer to the HW structure
1414 *
1415 * Returns whether phy is hung or not
1416 **/
1417bool e1000_check_phy_82574(struct e1000_hw *hw)
1418{
1419 u16 status_1kbt = 0;
1420 u16 receive_errors = 0;
1421 bool phy_hung = false;
1422 s32 ret_val = 0;
1423
1424 /*
1425 * Read PHY Receive Error counter first, if its is max - all F's then
1426 * read the Base1000T status register If both are max then PHY is hung.
1427 */
1428 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
1429
1430 if (ret_val)
1431 goto out;
1432 if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
1433 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
1434 if (ret_val)
1435 goto out;
1436 if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
1437 E1000_IDLE_ERROR_COUNT_MASK)
1438 phy_hung = true;
1439 }
1440out:
1441 return phy_hung;
1442}
1443
1444/**
1445 * e1000_setup_link_82571 - Setup flow control and link settings
1446 * @hw: pointer to the HW structure
1447 *
1448 * Determines which flow control settings to use, then configures flow
1449 * control. Calls the appropriate media-specific link configuration
1450 * function. Assuming the adapter has a valid link partner, a valid link
1451 * should be established. Assumes the hardware has previously been reset
1452 * and the transmitter and receiver are not enabled.
1453 **/
1454static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1455{
1456 /*
1457 * 82573 does not have a word in the NVM to determine
1458 * the default flow control setting, so we explicitly
1459 * set it to full.
1460 */
1461 switch (hw->mac.type) {
1462 case e1000_82573:
1463 case e1000_82574:
1464 case e1000_82583:
1465 if (hw->fc.requested_mode == e1000_fc_default)
1466 hw->fc.requested_mode = e1000_fc_full;
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 return e1000e_setup_link(hw);
1473}
1474
1475/**
1476 * e1000_setup_copper_link_82571 - Configure copper link settings
1477 * @hw: pointer to the HW structure
1478 *
1479 * Configures the link for auto-neg or forced speed and duplex. Then we check
1480 * for link, once link is established calls to configure collision distance
1481 * and flow control are called.
1482 **/
1483static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
1484{
1485 u32 ctrl;
1486 s32 ret_val;
1487
1488 ctrl = er32(CTRL);
1489 ctrl |= E1000_CTRL_SLU;
1490 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1491 ew32(CTRL, ctrl);
1492
1493 switch (hw->phy.type) {
1494 case e1000_phy_m88:
1495 case e1000_phy_bm:
1496 ret_val = e1000e_copper_link_setup_m88(hw);
1497 break;
1498 case e1000_phy_igp_2:
1499 ret_val = e1000e_copper_link_setup_igp(hw);
1500 break;
1501 default:
1502 return -E1000_ERR_PHY;
1503 break;
1504 }
1505
1506 if (ret_val)
1507 return ret_val;
1508
1509 ret_val = e1000e_setup_copper_link(hw);
1510
1511 return ret_val;
1512}
1513
1514/**
1515 * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
1516 * @hw: pointer to the HW structure
1517 *
1518 * Configures collision distance and flow control for fiber and serdes links.
1519 * Upon successful setup, poll for link.
1520 **/
1521static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1522{
1523 switch (hw->mac.type) {
1524 case e1000_82571:
1525 case e1000_82572:
1526 /*
1527 * If SerDes loopback mode is entered, there is no form
1528 * of reset to take the adapter out of that mode. So we
1529 * have to explicitly take the adapter out of loopback
1530 * mode. This prevents drivers from twiddling their thumbs
1531 * if another tool failed to take it out of loopback mode.
1532 */
1533 ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1534 break;
1535 default:
1536 break;
1537 }
1538
1539 return e1000e_setup_fiber_serdes_link(hw);
1540}
1541
1542/**
1543 * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
1544 * @hw: pointer to the HW structure
1545 *
1546 * Reports the link state as up or down.
1547 *
1548 * If autonegotiation is supported by the link partner, the link state is
1549 * determined by the result of autonegotiation. This is the most likely case.
1550 * If autonegotiation is not supported by the link partner, and the link
1551 * has a valid signal, force the link up.
1552 *
1553 * The link state is represented internally here by 4 states:
1554 *
1555 * 1) down
1556 * 2) autoneg_progress
1557 * 3) autoneg_complete (the link successfully autonegotiated)
1558 * 4) forced_up (the link has been forced up, it did not autonegotiate)
1559 *
1560 **/
1561static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1562{
1563 struct e1000_mac_info *mac = &hw->mac;
1564 u32 rxcw;
1565 u32 ctrl;
1566 u32 status;
1567 u32 txcw;
1568 u32 i;
1569 s32 ret_val = 0;
1570
1571 ctrl = er32(CTRL);
1572 status = er32(STATUS);
1573 rxcw = er32(RXCW);
1574
1575 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
1576
1577 /* Receiver is synchronized with no invalid bits. */
1578 switch (mac->serdes_link_state) {
1579 case e1000_serdes_link_autoneg_complete:
1580 if (!(status & E1000_STATUS_LU)) {
1581 /*
1582 * We have lost link, retry autoneg before
1583 * reporting link failure
1584 */
1585 mac->serdes_link_state =
1586 e1000_serdes_link_autoneg_progress;
1587 mac->serdes_has_link = false;
1588 e_dbg("AN_UP -> AN_PROG\n");
1589 } else {
1590 mac->serdes_has_link = true;
1591 }
1592 break;
1593
1594 case e1000_serdes_link_forced_up:
1595 /*
1596 * If we are receiving /C/ ordered sets, re-enable
1597 * auto-negotiation in the TXCW register and disable
1598 * forced link in the Device Control register in an
1599 * attempt to auto-negotiate with our link partner.
1600 * If the partner code word is null, stop forcing
1601 * and restart auto negotiation.
1602 */
1603 if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1604 /* Enable autoneg, and unforce link up */
1605 ew32(TXCW, mac->txcw);
1606 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1607 mac->serdes_link_state =
1608 e1000_serdes_link_autoneg_progress;
1609 mac->serdes_has_link = false;
1610 e_dbg("FORCED_UP -> AN_PROG\n");
1611 } else {
1612 mac->serdes_has_link = true;
1613 }
1614 break;
1615
1616 case e1000_serdes_link_autoneg_progress:
1617 if (rxcw & E1000_RXCW_C) {
1618 /*
1619 * We received /C/ ordered sets, meaning the
1620 * link partner has autonegotiated, and we can
1621 * trust the Link Up (LU) status bit.
1622 */
1623 if (status & E1000_STATUS_LU) {
1624 mac->serdes_link_state =
1625 e1000_serdes_link_autoneg_complete;
1626 e_dbg("AN_PROG -> AN_UP\n");
1627 mac->serdes_has_link = true;
1628 } else {
1629 /* Autoneg completed, but failed. */
1630 mac->serdes_link_state =
1631 e1000_serdes_link_down;
1632 e_dbg("AN_PROG -> DOWN\n");
1633 }
1634 } else {
1635 /*
1636 * The link partner did not autoneg.
1637 * Force link up and full duplex, and change
1638 * state to forced.
1639 */
1640 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
1641 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
1642 ew32(CTRL, ctrl);
1643
1644 /* Configure Flow Control after link up. */
1645 ret_val = e1000e_config_fc_after_link_up(hw);
1646 if (ret_val) {
1647 e_dbg("Error config flow control\n");
1648 break;
1649 }
1650 mac->serdes_link_state =
1651 e1000_serdes_link_forced_up;
1652 mac->serdes_has_link = true;
1653 e_dbg("AN_PROG -> FORCED_UP\n");
1654 }
1655 break;
1656
1657 case e1000_serdes_link_down:
1658 default:
1659 /*
1660 * The link was down but the receiver has now gained
1661 * valid sync, so lets see if we can bring the link
1662 * up.
1663 */
1664 ew32(TXCW, mac->txcw);
1665 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1666 mac->serdes_link_state =
1667 e1000_serdes_link_autoneg_progress;
1668 mac->serdes_has_link = false;
1669 e_dbg("DOWN -> AN_PROG\n");
1670 break;
1671 }
1672 } else {
1673 if (!(rxcw & E1000_RXCW_SYNCH)) {
1674 mac->serdes_has_link = false;
1675 mac->serdes_link_state = e1000_serdes_link_down;
1676 e_dbg("ANYSTATE -> DOWN\n");
1677 } else {
1678 /*
1679 * Check several times, if Sync and Config
1680 * both are consistently 1 then simply ignore
1681 * the Invalid bit and restart Autoneg
1682 */
1683 for (i = 0; i < AN_RETRY_COUNT; i++) {
1684 udelay(10);
1685 rxcw = er32(RXCW);
1686 if ((rxcw & E1000_RXCW_IV) &&
1687 !((rxcw & E1000_RXCW_SYNCH) &&
1688 (rxcw & E1000_RXCW_C))) {
1689 mac->serdes_has_link = false;
1690 mac->serdes_link_state =
1691 e1000_serdes_link_down;
1692 e_dbg("ANYSTATE -> DOWN\n");
1693 break;
1694 }
1695 }
1696
1697 if (i == AN_RETRY_COUNT) {
1698 txcw = er32(TXCW);
1699 txcw |= E1000_TXCW_ANE;
1700 ew32(TXCW, txcw);
1701 mac->serdes_link_state =
1702 e1000_serdes_link_autoneg_progress;
1703 mac->serdes_has_link = false;
1704 e_dbg("ANYSTATE -> AN_PROG\n");
1705 }
1706 }
1707 }
1708
1709 return ret_val;
1710}
1711
1712/**
1713 * e1000_valid_led_default_82571 - Verify a valid default LED config
1714 * @hw: pointer to the HW structure
1715 * @data: pointer to the NVM (EEPROM)
1716 *
1717 * Read the EEPROM for the current default LED configuration. If the
1718 * LED configuration is not valid, set to a valid LED configuration.
1719 **/
1720static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1721{
1722 s32 ret_val;
1723
1724 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1725 if (ret_val) {
1726 e_dbg("NVM Read Error\n");
1727 return ret_val;
1728 }
1729
1730 switch (hw->mac.type) {
1731 case e1000_82573:
1732 case e1000_82574:
1733 case e1000_82583:
1734 if (*data == ID_LED_RESERVED_F746)
1735 *data = ID_LED_DEFAULT_82573;
1736 break;
1737 default:
1738 if (*data == ID_LED_RESERVED_0000 ||
1739 *data == ID_LED_RESERVED_FFFF)
1740 *data = ID_LED_DEFAULT;
1741 break;
1742 }
1743
1744 return 0;
1745}
1746
1747/**
1748 * e1000e_get_laa_state_82571 - Get locally administered address state
1749 * @hw: pointer to the HW structure
1750 *
1751 * Retrieve and return the current locally administered address state.
1752 **/
1753bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
1754{
1755 if (hw->mac.type != e1000_82571)
1756 return false;
1757
1758 return hw->dev_spec.e82571.laa_is_present;
1759}
1760
1761/**
1762 * e1000e_set_laa_state_82571 - Set locally administered address state
1763 * @hw: pointer to the HW structure
1764 * @state: enable/disable locally administered address
1765 *
1766 * Enable/Disable the current locally administered address state.
1767 **/
1768void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1769{
1770 if (hw->mac.type != e1000_82571)
1771 return;
1772
1773 hw->dev_spec.e82571.laa_is_present = state;
1774
1775 /* If workaround is activated... */
1776 if (state)
1777 /*
1778 * Hold a copy of the LAA in RAR[14] This is done so that
1779 * between the time RAR[0] gets clobbered and the time it
1780 * gets fixed, the actual LAA is in one of the RARs and no
1781 * incoming packets directed to this port are dropped.
1782 * Eventually the LAA will be in RAR[0] and RAR[14].
1783 */
1784 e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
1785}
1786
1787/**
1788 * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
1789 * @hw: pointer to the HW structure
1790 *
1791 * Verifies that the EEPROM has completed the update. After updating the
1792 * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
1793 * the checksum fix is not implemented, we need to set the bit and update
1794 * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
1795 * we need to return bad checksum.
1796 **/
1797static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1798{
1799 struct e1000_nvm_info *nvm = &hw->nvm;
1800 s32 ret_val;
1801 u16 data;
1802
1803 if (nvm->type != e1000_nvm_flash_hw)
1804 return 0;
1805
1806 /*
1807 * Check bit 4 of word 10h. If it is 0, firmware is done updating
1808 * 10h-12h. Checksum may need to be fixed.
1809 */
1810 ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
1811 if (ret_val)
1812 return ret_val;
1813
1814 if (!(data & 0x10)) {
1815 /*
1816 * Read 0x23 and check bit 15. This bit is a 1
1817 * when the checksum has already been fixed. If
1818 * the checksum is still wrong and this bit is a
1819 * 1, we need to return bad checksum. Otherwise,
1820 * we need to set this bit to a 1 and update the
1821 * checksum.
1822 */
1823 ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
1824 if (ret_val)
1825 return ret_val;
1826
1827 if (!(data & 0x8000)) {
1828 data |= 0x8000;
1829 ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
1830 if (ret_val)
1831 return ret_val;
1832 ret_val = e1000e_update_nvm_checksum(hw);
1833 }
1834 }
1835
1836 return 0;
1837}
1838
1839/**
1840 * e1000_read_mac_addr_82571 - Read device MAC address
1841 * @hw: pointer to the HW structure
1842 **/
1843static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1844{
1845 s32 ret_val = 0;
1846
1847 if (hw->mac.type == e1000_82571) {
1848 /*
1849 * If there's an alternate MAC address place it in RAR0
1850 * so that it will override the Si installed default perm
1851 * address.
1852 */
1853 ret_val = e1000_check_alt_mac_addr_generic(hw);
1854 if (ret_val)
1855 goto out;
1856 }
1857
1858 ret_val = e1000_read_mac_addr_generic(hw);
1859
1860out:
1861 return ret_val;
1862}
1863
1864/**
1865 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
1866 * @hw: pointer to the HW structure
1867 *
1868 * In the case of a PHY power down to save power, or to turn off link during a
1869 * driver unload, or wake on lan is not enabled, remove the link.
1870 **/
1871static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
1872{
1873 struct e1000_phy_info *phy = &hw->phy;
1874 struct e1000_mac_info *mac = &hw->mac;
1875
1876 if (!(phy->ops.check_reset_block))
1877 return;
1878
1879 /* If the management interface is not enabled, then power down */
1880 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1881 e1000_power_down_phy_copper(hw);
1882}
1883
1884/**
1885 * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
1886 * @hw: pointer to the HW structure
1887 *
1888 * Clears the hardware counters by reading the counter registers.
1889 **/
1890static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1891{
1892 e1000e_clear_hw_cntrs_base(hw);
1893
1894 er32(PRC64);
1895 er32(PRC127);
1896 er32(PRC255);
1897 er32(PRC511);
1898 er32(PRC1023);
1899 er32(PRC1522);
1900 er32(PTC64);
1901 er32(PTC127);
1902 er32(PTC255);
1903 er32(PTC511);
1904 er32(PTC1023);
1905 er32(PTC1522);
1906
1907 er32(ALGNERRC);
1908 er32(RXERRC);
1909 er32(TNCRS);
1910 er32(CEXTERR);
1911 er32(TSCTC);
1912 er32(TSCTFC);
1913
1914 er32(MGTPRC);
1915 er32(MGTPDC);
1916 er32(MGTPTC);
1917
1918 er32(IAC);
1919 er32(ICRXOC);
1920
1921 er32(ICRXPTC);
1922 er32(ICRXATC);
1923 er32(ICTXPTC);
1924 er32(ICTXATC);
1925 er32(ICTXQEC);
1926 er32(ICTXQMTC);
1927 er32(ICRXDMTC);
1928}
1929
1930static struct e1000_mac_operations e82571_mac_ops = {
1931 /* .check_mng_mode: mac type dependent */
1932 /* .check_for_link: media type dependent */
1933 .id_led_init = e1000e_id_led_init,
1934 .cleanup_led = e1000e_cleanup_led_generic,
1935 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1936 .get_bus_info = e1000e_get_bus_info_pcie,
1937 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1938 /* .get_link_up_info: media type dependent */
1939 /* .led_on: mac type dependent */
1940 .led_off = e1000e_led_off_generic,
1941 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1942 .write_vfta = e1000_write_vfta_generic,
1943 .clear_vfta = e1000_clear_vfta_82571,
1944 .reset_hw = e1000_reset_hw_82571,
1945 .init_hw = e1000_init_hw_82571,
1946 .setup_link = e1000_setup_link_82571,
1947 /* .setup_physical_interface: media type dependent */
1948 .setup_led = e1000e_setup_led_generic,
1949 .read_mac_addr = e1000_read_mac_addr_82571,
1950};
1951
1952static struct e1000_phy_operations e82_phy_ops_igp = {
1953 .acquire = e1000_get_hw_semaphore_82571,
1954 .check_polarity = e1000_check_polarity_igp,
1955 .check_reset_block = e1000e_check_reset_block_generic,
1956 .commit = NULL,
1957 .force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
1958 .get_cfg_done = e1000_get_cfg_done_82571,
1959 .get_cable_length = e1000e_get_cable_length_igp_2,
1960 .get_info = e1000e_get_phy_info_igp,
1961 .read_reg = e1000e_read_phy_reg_igp,
1962 .release = e1000_put_hw_semaphore_82571,
1963 .reset = e1000e_phy_hw_reset_generic,
1964 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1965 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1966 .write_reg = e1000e_write_phy_reg_igp,
1967 .cfg_on_link_up = NULL,
1968};
1969
1970static struct e1000_phy_operations e82_phy_ops_m88 = {
1971 .acquire = e1000_get_hw_semaphore_82571,
1972 .check_polarity = e1000_check_polarity_m88,
1973 .check_reset_block = e1000e_check_reset_block_generic,
1974 .commit = e1000e_phy_sw_reset,
1975 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1976 .get_cfg_done = e1000e_get_cfg_done,
1977 .get_cable_length = e1000e_get_cable_length_m88,
1978 .get_info = e1000e_get_phy_info_m88,
1979 .read_reg = e1000e_read_phy_reg_m88,
1980 .release = e1000_put_hw_semaphore_82571,
1981 .reset = e1000e_phy_hw_reset_generic,
1982 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1983 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1984 .write_reg = e1000e_write_phy_reg_m88,
1985 .cfg_on_link_up = NULL,
1986};
1987
1988static struct e1000_phy_operations e82_phy_ops_bm = {
1989 .acquire = e1000_get_hw_semaphore_82571,
1990 .check_polarity = e1000_check_polarity_m88,
1991 .check_reset_block = e1000e_check_reset_block_generic,
1992 .commit = e1000e_phy_sw_reset,
1993 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1994 .get_cfg_done = e1000e_get_cfg_done,
1995 .get_cable_length = e1000e_get_cable_length_m88,
1996 .get_info = e1000e_get_phy_info_m88,
1997 .read_reg = e1000e_read_phy_reg_bm2,
1998 .release = e1000_put_hw_semaphore_82571,
1999 .reset = e1000e_phy_hw_reset_generic,
2000 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
2001 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
2002 .write_reg = e1000e_write_phy_reg_bm2,
2003 .cfg_on_link_up = NULL,
2004};
2005
2006static struct e1000_nvm_operations e82571_nvm_ops = {
2007 .acquire = e1000_acquire_nvm_82571,
2008 .read = e1000e_read_nvm_eerd,
2009 .release = e1000_release_nvm_82571,
2010 .update = e1000_update_nvm_checksum_82571,
2011 .valid_led_default = e1000_valid_led_default_82571,
2012 .validate = e1000_validate_nvm_checksum_82571,
2013 .write = e1000_write_nvm_82571,
2014};
2015
2016struct e1000_info e1000_82571_info = {
2017 .mac = e1000_82571,
2018 .flags = FLAG_HAS_HW_VLAN_FILTER
2019 | FLAG_HAS_JUMBO_FRAMES
2020 | FLAG_HAS_WOL
2021 | FLAG_APME_IN_CTRL3
2022 | FLAG_RX_CSUM_ENABLED
2023 | FLAG_HAS_CTRLEXT_ON_LOAD
2024 | FLAG_HAS_SMART_POWER_DOWN
2025 | FLAG_RESET_OVERWRITES_LAA /* errata */
2026 | FLAG_TARC_SPEED_MODE_BIT /* errata */
2027 | FLAG_APME_CHECK_PORT_B,
2028 .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
2029 | FLAG2_DMA_BURST,
2030 .pba = 38,
2031 .max_hw_frame_size = DEFAULT_JUMBO,
2032 .get_variants = e1000_get_variants_82571,
2033 .mac_ops = &e82571_mac_ops,
2034 .phy_ops = &e82_phy_ops_igp,
2035 .nvm_ops = &e82571_nvm_ops,
2036};
2037
2038struct e1000_info e1000_82572_info = {
2039 .mac = e1000_82572,
2040 .flags = FLAG_HAS_HW_VLAN_FILTER
2041 | FLAG_HAS_JUMBO_FRAMES
2042 | FLAG_HAS_WOL
2043 | FLAG_APME_IN_CTRL3
2044 | FLAG_RX_CSUM_ENABLED
2045 | FLAG_HAS_CTRLEXT_ON_LOAD
2046 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
2047 .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
2048 | FLAG2_DMA_BURST,
2049 .pba = 38,
2050 .max_hw_frame_size = DEFAULT_JUMBO,
2051 .get_variants = e1000_get_variants_82571,
2052 .mac_ops = &e82571_mac_ops,
2053 .phy_ops = &e82_phy_ops_igp,
2054 .nvm_ops = &e82571_nvm_ops,
2055};
2056
2057struct e1000_info e1000_82573_info = {
2058 .mac = e1000_82573,
2059 .flags = FLAG_HAS_HW_VLAN_FILTER
2060 | FLAG_HAS_WOL
2061 | FLAG_APME_IN_CTRL3
2062 | FLAG_RX_CSUM_ENABLED
2063 | FLAG_HAS_SMART_POWER_DOWN
2064 | FLAG_HAS_AMT
2065 | FLAG_HAS_SWSM_ON_LOAD,
2066 .flags2 = FLAG2_DISABLE_ASPM_L1
2067 | FLAG2_DISABLE_ASPM_L0S,
2068 .pba = 20,
2069 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
2070 .get_variants = e1000_get_variants_82571,
2071 .mac_ops = &e82571_mac_ops,
2072 .phy_ops = &e82_phy_ops_m88,
2073 .nvm_ops = &e82571_nvm_ops,
2074};
2075
2076struct e1000_info e1000_82574_info = {
2077 .mac = e1000_82574,
2078 .flags = FLAG_HAS_HW_VLAN_FILTER
2079 | FLAG_HAS_MSIX
2080 | FLAG_HAS_JUMBO_FRAMES
2081 | FLAG_HAS_WOL
2082 | FLAG_APME_IN_CTRL3
2083 | FLAG_RX_CSUM_ENABLED
2084 | FLAG_HAS_SMART_POWER_DOWN
2085 | FLAG_HAS_AMT
2086 | FLAG_HAS_CTRLEXT_ON_LOAD,
2087 .flags2 = FLAG2_CHECK_PHY_HANG
2088 | FLAG2_DISABLE_ASPM_L0S
2089 | FLAG2_NO_DISABLE_RX,
2090 .pba = 32,
2091 .max_hw_frame_size = DEFAULT_JUMBO,
2092 .get_variants = e1000_get_variants_82571,
2093 .mac_ops = &e82571_mac_ops,
2094 .phy_ops = &e82_phy_ops_bm,
2095 .nvm_ops = &e82571_nvm_ops,
2096};
2097
2098struct e1000_info e1000_82583_info = {
2099 .mac = e1000_82583,
2100 .flags = FLAG_HAS_HW_VLAN_FILTER
2101 | FLAG_HAS_WOL
2102 | FLAG_APME_IN_CTRL3
2103 | FLAG_RX_CSUM_ENABLED
2104 | FLAG_HAS_SMART_POWER_DOWN
2105 | FLAG_HAS_AMT
2106 | FLAG_HAS_JUMBO_FRAMES
2107 | FLAG_HAS_CTRLEXT_ON_LOAD,
2108 .flags2 = FLAG2_DISABLE_ASPM_L0S
2109 | FLAG2_NO_DISABLE_RX,
2110 .pba = 32,
2111 .max_hw_frame_size = DEFAULT_JUMBO,
2112 .get_variants = e1000_get_variants_82571,
2113 .mac_ops = &e82571_mac_ops,
2114 .phy_ops = &e82_phy_ops_bm,
2115 .nvm_ops = &e82571_nvm_ops,
2116};
2117
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
new file mode 100644
index 00000000000..28519acacd2
--- /dev/null
+++ b/drivers/net/e1000e/Makefile
@@ -0,0 +1,37 @@
1################################################################################
2#
3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2011 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29#
30# Makefile for the Intel(R) PRO/1000 ethernet driver
31#
32
33obj-$(CONFIG_E1000E) += e1000e.o
34
35e1000e-objs := 82571.o ich8lan.o es2lan.o \
36 lib.o phy.o param.o ethtool.o netdev.o
37
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
new file mode 100644
index 00000000000..c516a7440be
--- /dev/null
+++ b/drivers/net/e1000e/defines.h
@@ -0,0 +1,844 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _E1000_DEFINES_H_
30#define _E1000_DEFINES_H_
31
32#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
33#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
34#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
35#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
36#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
37#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
38#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
39#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
40#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
41#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
42#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
43#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
44#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
45#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
46#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
47#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
48#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
49#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
50
51/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
52#define REQ_TX_DESCRIPTOR_MULTIPLE 8
53#define REQ_RX_DESCRIPTOR_MULTIPLE 8
54
55/* Definitions for power management and wakeup registers */
56/* Wake Up Control */
57#define E1000_WUC_APME 0x00000001 /* APM Enable */
58#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
59#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
60
61/* Wake Up Filter Control */
62#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
63#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
64#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
65#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
66#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
67#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
68
69/* Wake Up Status */
70#define E1000_WUS_LNKC E1000_WUFC_LNKC
71#define E1000_WUS_MAG E1000_WUFC_MAG
72#define E1000_WUS_EX E1000_WUFC_EX
73#define E1000_WUS_MC E1000_WUFC_MC
74#define E1000_WUS_BC E1000_WUFC_BC
75
76/* Extended Device Control */
77#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
81#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
82#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
83#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
84#define E1000_CTRL_EXT_EIAME 0x01000000
85#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
86#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
87#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
88#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
89#define E1000_CTRL_EXT_LSECCK 0x00001000
90#define E1000_CTRL_EXT_PHYPDEN 0x00100000
91
92/* Receive Descriptor bit definitions */
93#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
94#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
95#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
96#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
97#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
98#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
99#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
100#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
101#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
102#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
103#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
104#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
105#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
106
107#define E1000_RXDEXT_STATERR_CE 0x01000000
108#define E1000_RXDEXT_STATERR_SE 0x02000000
109#define E1000_RXDEXT_STATERR_SEQ 0x04000000
110#define E1000_RXDEXT_STATERR_CXE 0x10000000
111#define E1000_RXDEXT_STATERR_RXE 0x80000000
112
113/* mask to determine if packets should be dropped due to frame errors */
114#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
115 E1000_RXD_ERR_CE | \
116 E1000_RXD_ERR_SE | \
117 E1000_RXD_ERR_SEQ | \
118 E1000_RXD_ERR_CXE | \
119 E1000_RXD_ERR_RXE)
120
121/* Same mask, but for extended and packet split descriptors */
122#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
123 E1000_RXDEXT_STATERR_CE | \
124 E1000_RXDEXT_STATERR_SE | \
125 E1000_RXDEXT_STATERR_SEQ | \
126 E1000_RXDEXT_STATERR_CXE | \
127 E1000_RXDEXT_STATERR_RXE)
128
129#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
130
131/* Management Control */
132#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
133#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
134#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
135#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
136#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
137/* Enable MAC address filtering */
138#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
139/* Enable MNG packets to host memory */
140#define E1000_MANC_EN_MNG2HOST 0x00200000
141
142#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
143#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
144#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
145#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
146
147/* Receive Control */
148#define E1000_RCTL_EN 0x00000002 /* enable */
149#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
150#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
151#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
152#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
153#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
154#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
155#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
156#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
157#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
158#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
159#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
160#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
161/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
162#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
163#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
164#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
165#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
166/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
167#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
168#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
169#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
170#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
171#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
172#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
173#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
174#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
175#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
176
177/*
178 * Use byte values for the following shift parameters
179 * Usage:
180 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
181 * E1000_PSRCTL_BSIZE0_MASK) |
182 * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
183 * E1000_PSRCTL_BSIZE1_MASK) |
184 * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
185 * E1000_PSRCTL_BSIZE2_MASK) |
186 * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
187 * E1000_PSRCTL_BSIZE3_MASK))
188 * where value0 = [128..16256], default=256
189 * value1 = [1024..64512], default=4096
190 * value2 = [0..64512], default=4096
191 * value3 = [0..64512], default=0
192 */
193
194#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
195#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
196#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
197#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
198
199#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
200#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
201#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
202#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
203
204/* SWFW_SYNC Definitions */
205#define E1000_SWFW_EEP_SM 0x1
206#define E1000_SWFW_PHY0_SM 0x2
207#define E1000_SWFW_PHY1_SM 0x4
208#define E1000_SWFW_CSR_SM 0x8
209
210/* Device Control */
211#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
212#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
213#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
214#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
215#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
216#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
217#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
218#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
219#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
220#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
221#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
222#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
223#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
224#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
225#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
226#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
227#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
228#define E1000_CTRL_RST 0x04000000 /* Global reset */
229#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
230#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
231#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
232#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
233
234/*
235 * Bit definitions for the Management Data IO (MDIO) and Management Data
236 * Clock (MDC) pins in the Device Control Register.
237 */
238
239/* Device Status */
240#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
241#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
242#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
243#define E1000_STATUS_FUNC_SHIFT 2
244#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
245#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
246#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
247#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
248#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
249#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
250#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
251#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
252
253/* Constants used to interpret the masked PCI-X bus speed. */
254
255#define HALF_DUPLEX 1
256#define FULL_DUPLEX 2
257
258
259#define ADVERTISE_10_HALF 0x0001
260#define ADVERTISE_10_FULL 0x0002
261#define ADVERTISE_100_HALF 0x0004
262#define ADVERTISE_100_FULL 0x0008
263#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
264#define ADVERTISE_1000_FULL 0x0020
265
266/* 1000/H is not supported, nor spec-compliant. */
267#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
268 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
269 ADVERTISE_1000_FULL)
270#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
271 ADVERTISE_100_HALF | ADVERTISE_100_FULL)
272#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
273#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
274#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
275
276#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
277
278/* LED Control */
279#define E1000_PHY_LED0_MODE_MASK 0x00000007
280#define E1000_PHY_LED0_IVRT 0x00000008
281#define E1000_PHY_LED0_MASK 0x0000001F
282
283#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
284#define E1000_LEDCTL_LED0_MODE_SHIFT 0
285#define E1000_LEDCTL_LED0_IVRT 0x00000040
286#define E1000_LEDCTL_LED0_BLINK 0x00000080
287
288#define E1000_LEDCTL_MODE_LINK_UP 0x2
289#define E1000_LEDCTL_MODE_LED_ON 0xE
290#define E1000_LEDCTL_MODE_LED_OFF 0xF
291
292/* Transmit Descriptor bit definitions */
293#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
294#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
295#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
296#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
297#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
298#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
299#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
300#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
301#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
302#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
303#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
304#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
305#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
306#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
307#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
308#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
309#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
310#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
311#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
312
313/* Transmit Control */
314#define E1000_TCTL_EN 0x00000002 /* enable Tx */
315#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
316#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
317#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
318#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
319#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
320
321/* Transmit Arbitration Count */
322
323/* SerDes Control */
324#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
325
326/* Receive Checksum Control */
327#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
328#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
329
330/* Header split receive */
331#define E1000_RFCTL_NFSW_DIS 0x00000040
332#define E1000_RFCTL_NFSR_DIS 0x00000080
333#define E1000_RFCTL_ACK_DIS 0x00001000
334#define E1000_RFCTL_EXTEN 0x00008000
335#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
336#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
337
338/* Collision related configuration parameters */
339#define E1000_COLLISION_THRESHOLD 15
340#define E1000_CT_SHIFT 4
341#define E1000_COLLISION_DISTANCE 63
342#define E1000_COLD_SHIFT 12
343
344/* Default values for the transmit IPG register */
345#define DEFAULT_82543_TIPG_IPGT_COPPER 8
346
347#define E1000_TIPG_IPGT_MASK 0x000003FF
348
349#define DEFAULT_82543_TIPG_IPGR1 8
350#define E1000_TIPG_IPGR1_SHIFT 10
351
352#define DEFAULT_82543_TIPG_IPGR2 6
353#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
354#define E1000_TIPG_IPGR2_SHIFT 20
355
356#define MAX_JUMBO_FRAME_SIZE 0x3F00
357
358/* Extended Configuration Control and Size */
359#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
360#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
361#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
362#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
363#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
364#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
365#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
366#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
367#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
368
369#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
370#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
371#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
372#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
373
374#define E1000_KABGTXD_BGSQLBIAS 0x00050000
375
376/* PBA constants */
377#define E1000_PBA_8K 0x0008 /* 8KB */
378#define E1000_PBA_16K 0x0010 /* 16KB */
379
380#define E1000_PBS_16K E1000_PBA_16K
381
382#define IFS_MAX 80
383#define IFS_MIN 40
384#define IFS_RATIO 4
385#define IFS_STEP 10
386#define MIN_NUM_XMITS 1000
387
388/* SW Semaphore Register */
389#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
390#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
391#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
392
393#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
394
395/* Interrupt Cause Read */
396#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
397#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
398#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
399#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
400#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
401#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
402#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
403#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
404#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
405#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
406#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
407
408/* PBA ECC Register */
409#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
410#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
411#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
412#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
413#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
414
415/*
416 * This defines the bits that are set in the Interrupt Mask
417 * Set/Read Register. Each bit is documented below:
418 * o RXT0 = Receiver Timer Interrupt (ring 0)
419 * o TXDW = Transmit Descriptor Written Back
420 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
421 * o RXSEQ = Receive Sequence Error
422 * o LSC = Link Status Change
423 */
424#define IMS_ENABLE_MASK ( \
425 E1000_IMS_RXT0 | \
426 E1000_IMS_TXDW | \
427 E1000_IMS_RXDMT0 | \
428 E1000_IMS_RXSEQ | \
429 E1000_IMS_LSC)
430
431/* Interrupt Mask Set */
432#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
433#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
434#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
435#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
436#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
437#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
438#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
439#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
440#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
441#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
442
443/* Interrupt Cause Set */
444#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
445#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
446#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
447
448/* Transmit Descriptor Control */
449#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
450#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
451#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
452#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
453#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
454#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
455/* Enable the counting of desc. still to be processed. */
456#define E1000_TXDCTL_COUNT_DESC 0x00400000
457
458/* Flow Control Constants */
459#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
460#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
461#define FLOW_CONTROL_TYPE 0x8808
462
463/* 802.1q VLAN Packet Size */
464#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
465
466/* Receive Address */
467/*
468 * Number of high/low register pairs in the RAR. The RAR (Receive Address
469 * Registers) holds the directed and multicast addresses that we monitor.
470 * Technically, we have 16 spots. However, we reserve one of these spots
471 * (RAR[15]) for our directed address used by controllers with
472 * manageability enabled, allowing us room for 15 multicast addresses.
473 */
474#define E1000_RAR_ENTRIES 15
475#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
476#define E1000_RAL_MAC_ADDR_LEN 4
477#define E1000_RAH_MAC_ADDR_LEN 2
478
479/* Error Codes */
480#define E1000_ERR_NVM 1
481#define E1000_ERR_PHY 2
482#define E1000_ERR_CONFIG 3
483#define E1000_ERR_PARAM 4
484#define E1000_ERR_MAC_INIT 5
485#define E1000_ERR_PHY_TYPE 6
486#define E1000_ERR_RESET 9
487#define E1000_ERR_MASTER_REQUESTS_PENDING 10
488#define E1000_ERR_HOST_INTERFACE_COMMAND 11
489#define E1000_BLK_PHY_RESET 12
490#define E1000_ERR_SWFW_SYNC 13
491#define E1000_NOT_IMPLEMENTED 14
492#define E1000_ERR_INVALID_ARGUMENT 16
493#define E1000_ERR_NO_SPACE 17
494#define E1000_ERR_NVM_PBA_SECTION 18
495
496/* Loop limit on how long we wait for auto-negotiation to complete */
497#define FIBER_LINK_UP_LIMIT 50
498#define COPPER_LINK_UP_LIMIT 10
499#define PHY_AUTO_NEG_LIMIT 45
500#define PHY_FORCE_LIMIT 20
501/* Number of 100 microseconds we wait for PCI Express master disable */
502#define MASTER_DISABLE_TIMEOUT 800
503/* Number of milliseconds we wait for PHY configuration done after MAC reset */
504#define PHY_CFG_TIMEOUT 100
505/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
506#define MDIO_OWNERSHIP_TIMEOUT 10
507/* Number of milliseconds for NVM auto read done after MAC reset. */
508#define AUTO_READ_DONE_TIMEOUT 10
509
510/* Flow Control */
511#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
512#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
513#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
514
515/* Transmit Configuration Word */
516#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
517#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
518#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
519#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
520#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
521
522/* Receive Configuration Word */
523#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
524#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
525#define E1000_RXCW_C 0x20000000 /* Receive config */
526#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
527
528/* PCI Express Control */
529#define E1000_GCR_RXD_NO_SNOOP 0x00000001
530#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
531#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
532#define E1000_GCR_TXD_NO_SNOOP 0x00000008
533#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
534#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
535
536#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
537 E1000_GCR_RXDSCW_NO_SNOOP | \
538 E1000_GCR_RXDSCR_NO_SNOOP | \
539 E1000_GCR_TXD_NO_SNOOP | \
540 E1000_GCR_TXDSCW_NO_SNOOP | \
541 E1000_GCR_TXDSCR_NO_SNOOP)
542
543/* PHY Control Register */
544#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
545#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
546#define MII_CR_POWER_DOWN 0x0800 /* Power down */
547#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
548#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
549#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
550#define MII_CR_SPEED_1000 0x0040
551#define MII_CR_SPEED_100 0x2000
552#define MII_CR_SPEED_10 0x0000
553
554/* PHY Status Register */
555#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
556#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
557
558/* Autoneg Advertisement Register */
559#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
560#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
561#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
562#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
563#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
564#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
565
566/* Link Partner Ability Register (Base Page) */
567#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
568#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
569
570/* Autoneg Expansion Register */
571#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
572
573/* 1000BASE-T Control Register */
574#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
575#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
576 /* 0=DTE device */
577#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
578 /* 0=Configure PHY as Slave */
579#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
580 /* 0=Automatic Master/Slave config */
581
582/* 1000BASE-T Status Register */
583#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
584#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
585
586
587/* PHY 1000 MII Register/Bit Definitions */
588/* PHY Registers defined by IEEE */
589#define PHY_CONTROL 0x00 /* Control Register */
590#define PHY_STATUS 0x01 /* Status Register */
591#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
592#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
593#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
594#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
595#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
596#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
597#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
598#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
599
600#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
601
602/* NVM Control */
603#define E1000_EECD_SK 0x00000001 /* NVM Clock */
604#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
605#define E1000_EECD_DI 0x00000004 /* NVM Data In */
606#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
607#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
608#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
609#define E1000_EECD_PRES 0x00000100 /* NVM Present */
610#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
611/* NVM Addressing bits based on type (0-small, 1-large) */
612#define E1000_EECD_ADDR_BITS 0x00000400
613#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
614#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
615#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
616#define E1000_EECD_SIZE_EX_SHIFT 11
617#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
618#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
619#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
620#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
621
622#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
623#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
624#define E1000_NVM_RW_REG_START 1 /* Start operation */
625#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
626#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
627#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
628#define E1000_FLASH_UPDATES 2000
629
630/* NVM Word Offsets */
631#define NVM_COMPAT 0x0003
632#define NVM_ID_LED_SETTINGS 0x0004
633#define NVM_INIT_CONTROL2_REG 0x000F
634#define NVM_INIT_CONTROL3_PORT_B 0x0014
635#define NVM_INIT_3GIO_3 0x001A
636#define NVM_INIT_CONTROL3_PORT_A 0x0024
637#define NVM_CFG 0x0012
638#define NVM_ALT_MAC_ADDR_PTR 0x0037
639#define NVM_CHECKSUM_REG 0x003F
640
641#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
642
643#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
644#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
645
646/* Mask bits for fields in Word 0x0f of the NVM */
647#define NVM_WORD0F_PAUSE_MASK 0x3000
648#define NVM_WORD0F_PAUSE 0x1000
649#define NVM_WORD0F_ASM_DIR 0x2000
650
651/* Mask bits for fields in Word 0x1a of the NVM */
652#define NVM_WORD1A_ASPM_MASK 0x000C
653
654/* Mask bits for fields in Word 0x03 of the EEPROM */
655#define NVM_COMPAT_LOM 0x0800
656
657/* length of string needed to store PBA number */
658#define E1000_PBANUM_LENGTH 11
659
660/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
661#define NVM_SUM 0xBABA
662
663/* PBA (printed board assembly) number words */
664#define NVM_PBA_OFFSET_0 8
665#define NVM_PBA_OFFSET_1 9
666#define NVM_PBA_PTR_GUARD 0xFAFA
667#define NVM_WORD_SIZE_BASE_SHIFT 6
668
669/* NVM Commands - SPI */
670#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
671#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
672#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
673#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
674#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
675#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
676
677/* SPI NVM Status Register */
678#define NVM_STATUS_RDY_SPI 0x01
679
680/* Word definitions for ID LED Settings */
681#define ID_LED_RESERVED_0000 0x0000
682#define ID_LED_RESERVED_FFFF 0xFFFF
683#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
684 (ID_LED_OFF1_OFF2 << 8) | \
685 (ID_LED_DEF1_DEF2 << 4) | \
686 (ID_LED_DEF1_DEF2))
687#define ID_LED_DEF1_DEF2 0x1
688#define ID_LED_DEF1_ON2 0x2
689#define ID_LED_DEF1_OFF2 0x3
690#define ID_LED_ON1_DEF2 0x4
691#define ID_LED_ON1_ON2 0x5
692#define ID_LED_ON1_OFF2 0x6
693#define ID_LED_OFF1_DEF2 0x7
694#define ID_LED_OFF1_ON2 0x8
695#define ID_LED_OFF1_OFF2 0x9
696
697#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
698#define IGP_ACTIVITY_LED_ENABLE 0x0300
699#define IGP_LED3_MODE 0x07000000
700
701/* PCI/PCI-X/PCI-EX Config space */
702#define PCI_HEADER_TYPE_REGISTER 0x0E
703#define PCIE_LINK_STATUS 0x12
704
705#define PCI_HEADER_TYPE_MULTIFUNC 0x80
706#define PCIE_LINK_WIDTH_MASK 0x3F0
707#define PCIE_LINK_WIDTH_SHIFT 4
708
709#define PHY_REVISION_MASK 0xFFFFFFF0
710#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
711#define MAX_PHY_MULTI_PAGE_REG 0xF
712
713/* Bit definitions for valid PHY IDs. */
714/*
715 * I = Integrated
716 * E = External
717 */
718#define M88E1000_E_PHY_ID 0x01410C50
719#define M88E1000_I_PHY_ID 0x01410C30
720#define M88E1011_I_PHY_ID 0x01410C20
721#define IGP01E1000_I_PHY_ID 0x02A80380
722#define M88E1111_I_PHY_ID 0x01410CC0
723#define GG82563_E_PHY_ID 0x01410CA0
724#define IGP03E1000_E_PHY_ID 0x02A80390
725#define IFE_E_PHY_ID 0x02A80330
726#define IFE_PLUS_E_PHY_ID 0x02A80320
727#define IFE_C_E_PHY_ID 0x02A80310
728#define BME1000_E_PHY_ID 0x01410CB0
729#define BME1000_E_PHY_ID_R2 0x01410CB1
730#define I82577_E_PHY_ID 0x01540050
731#define I82578_E_PHY_ID 0x004DD040
732#define I82579_E_PHY_ID 0x01540090
733
734/* M88E1000 Specific Registers */
735#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
736#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
737#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
738
739#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
740#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
741
742/* M88E1000 PHY Specific Control Register */
743#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
744#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
745 /* Manual MDI configuration */
746#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
747/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
748#define M88E1000_PSCR_AUTO_X_1000T 0x0040
749/* Auto crossover enabled all speeds */
750#define M88E1000_PSCR_AUTO_X_MODE 0x0060
751/*
752 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
753 * 0=Normal 10BASE-T Rx Threshold
754 */
755#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
756
757/* M88E1000 PHY Specific Status Register */
758#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
759#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
760#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
761/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
762#define M88E1000_PSSR_CABLE_LENGTH 0x0380
763#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
764#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
765
766#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
767
768/*
769 * Number of times we will attempt to autonegotiate before downshifting if we
770 * are the master
771 */
772#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
773#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
774/*
775 * Number of times we will attempt to autonegotiate before downshifting if we
776 * are the slave
777 */
778#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
779#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
780#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
781
782/* M88EC018 Rev 2 specific DownShift settings */
783#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
784#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
785
786#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
787#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
788
789/* BME1000 PHY Specific Control Register */
790#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
791
792
793#define PHY_PAGE_SHIFT 5
794#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
795 ((reg) & MAX_PHY_REG_ADDRESS))
796
797/*
798 * Bits...
799 * 15-5: page
800 * 4-0: register offset
801 */
802#define GG82563_PAGE_SHIFT 5
803#define GG82563_REG(page, reg) \
804 (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
805#define GG82563_MIN_ALT_REG 30
806
807/* GG82563 Specific Registers */
808#define GG82563_PHY_SPEC_CTRL \
809 GG82563_REG(0, 16) /* PHY Specific Control */
810#define GG82563_PHY_PAGE_SELECT \
811 GG82563_REG(0, 22) /* Page Select */
812#define GG82563_PHY_SPEC_CTRL_2 \
813 GG82563_REG(0, 26) /* PHY Specific Control 2 */
814#define GG82563_PHY_PAGE_SELECT_ALT \
815 GG82563_REG(0, 29) /* Alternate Page Select */
816
817#define GG82563_PHY_MAC_SPEC_CTRL \
818 GG82563_REG(2, 21) /* MAC Specific Control Register */
819
820#define GG82563_PHY_DSP_DISTANCE \
821 GG82563_REG(5, 26) /* DSP Distance */
822
823/* Page 193 - Port Control Registers */
824#define GG82563_PHY_KMRN_MODE_CTRL \
825 GG82563_REG(193, 16) /* Kumeran Mode Control */
826#define GG82563_PHY_PWR_MGMT_CTRL \
827 GG82563_REG(193, 20) /* Power Management Control */
828
829/* Page 194 - KMRN Registers */
830#define GG82563_PHY_INBAND_CTRL \
831 GG82563_REG(194, 18) /* Inband Control */
832
833/* MDI Control */
834#define E1000_MDIC_REG_SHIFT 16
835#define E1000_MDIC_PHY_SHIFT 21
836#define E1000_MDIC_OP_WRITE 0x04000000
837#define E1000_MDIC_OP_READ 0x08000000
838#define E1000_MDIC_READY 0x10000000
839#define E1000_MDIC_ERROR 0x40000000
840
841/* SerDes Control */
842#define E1000_GEN_POLL_TIMEOUT 640
843
844#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
new file mode 100644
index 00000000000..8533ad7f355
--- /dev/null
+++ b/drivers/net/e1000e/e1000.h
@@ -0,0 +1,741 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* Linux PRO/1000 Ethernet Driver main header file */
30
31#ifndef _E1000_H_
32#define _E1000_H_
33
34#include <linux/bitops.h>
35#include <linux/types.h>
36#include <linux/timer.h>
37#include <linux/workqueue.h>
38#include <linux/io.h>
39#include <linux/netdevice.h>
40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
42#include <linux/crc32.h>
43#include <linux/if_vlan.h>
44
45#include "hw.h"
46
47struct e1000_info;
48
49#define e_dbg(format, arg...) \
50 netdev_dbg(hw->adapter->netdev, format, ## arg)
51#define e_err(format, arg...) \
52 netdev_err(adapter->netdev, format, ## arg)
53#define e_info(format, arg...) \
54 netdev_info(adapter->netdev, format, ## arg)
55#define e_warn(format, arg...) \
56 netdev_warn(adapter->netdev, format, ## arg)
57#define e_notice(format, arg...) \
58 netdev_notice(adapter->netdev, format, ## arg)
59
60
61/* Interrupt modes, as used by the IntMode parameter */
62#define E1000E_INT_MODE_LEGACY 0
63#define E1000E_INT_MODE_MSI 1
64#define E1000E_INT_MODE_MSIX 2
65
66/* Tx/Rx descriptor defines */
67#define E1000_DEFAULT_TXD 256
68#define E1000_MAX_TXD 4096
69#define E1000_MIN_TXD 64
70
71#define E1000_DEFAULT_RXD 256
72#define E1000_MAX_RXD 4096
73#define E1000_MIN_RXD 64
74
75#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
76#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
77
78/* Early Receive defines */
79#define E1000_ERT_2048 0x100
80
81#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
82
83/* How many Tx Descriptors do we need to call netif_wake_queue ? */
84/* How many Rx Buffers do we bundle into one write to the hardware ? */
85#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
86
87#define AUTO_ALL_MODES 0
88#define E1000_EEPROM_APME 0x0400
89
90#define E1000_MNG_VLAN_NONE (-1)
91
92/* Number of packet split data buffers (not including the header buffer) */
93#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
94
95#define DEFAULT_JUMBO 9234
96
97/* BM/HV Specific Registers */
98#define BM_PORT_CTRL_PAGE 769
99
100#define PHY_UPPER_SHIFT 21
101#define BM_PHY_REG(page, reg) \
102 (((reg) & MAX_PHY_REG_ADDRESS) |\
103 (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
104 (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
105
106/* PHY Wakeup Registers and defines */
107#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
108#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
109#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
110#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
111#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
112#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
113#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
114#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
115#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
116#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
117
118#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
119#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
120#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
121#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
122#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
123#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
124#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
125
126#define HV_STATS_PAGE 778
127#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
128#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
129#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
130#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
131#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
132#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
133#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
134#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
135#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
136#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
137#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
138#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
139#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
140#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
141
142#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
143
144/* BM PHY Copper Specific Status */
145#define BM_CS_STATUS 17
146#define BM_CS_STATUS_LINK_UP 0x0400
147#define BM_CS_STATUS_RESOLVED 0x0800
148#define BM_CS_STATUS_SPEED_MASK 0xC000
149#define BM_CS_STATUS_SPEED_1000 0x8000
150
151/* 82577 Mobile Phy Status Register */
152#define HV_M_STATUS 26
153#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
154#define HV_M_STATUS_SPEED_MASK 0x0300
155#define HV_M_STATUS_SPEED_1000 0x0200
156#define HV_M_STATUS_LINK_UP 0x0040
157
158#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
159#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
160
161/* Time to wait before putting the device into D3 if there's no link (in ms). */
162#define LINK_TIMEOUT 100
163
164#define DEFAULT_RDTR 0
165#define DEFAULT_RADV 8
166#define BURST_RDTR 0x20
167#define BURST_RADV 0x20
168
169/*
170 * in the case of WTHRESH, it appears at least the 82571/2 hardware
171 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
172 * WTHRESH=4, and since we want 64 bytes at a time written back, set
173 * it to 5
174 */
175#define E1000_TXDCTL_DMA_BURST_ENABLE \
176 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
177 E1000_TXDCTL_COUNT_DESC | \
178 (5 << 16) | /* wthresh must be +1 more than desired */\
179 (1 << 8) | /* hthresh */ \
180 0x1f) /* pthresh */
181
182#define E1000_RXDCTL_DMA_BURST_ENABLE \
183 (0x01000000 | /* set descriptor granularity */ \
184 (4 << 16) | /* set writeback threshold */ \
185 (4 << 8) | /* set prefetch threshold */ \
186 0x20) /* set hthresh */
187
188#define E1000_TIDV_FPD (1 << 31)
189#define E1000_RDTR_FPD (1 << 31)
190
191enum e1000_boards {
192 board_82571,
193 board_82572,
194 board_82573,
195 board_82574,
196 board_82583,
197 board_80003es2lan,
198 board_ich8lan,
199 board_ich9lan,
200 board_ich10lan,
201 board_pchlan,
202 board_pch2lan,
203};
204
205struct e1000_ps_page {
206 struct page *page;
207 u64 dma; /* must be u64 - written to hw */
208};
209
210/*
211 * wrappers around a pointer to a socket buffer,
212 * so a DMA handle can be stored along with the buffer
213 */
214struct e1000_buffer {
215 dma_addr_t dma;
216 struct sk_buff *skb;
217 union {
218 /* Tx */
219 struct {
220 unsigned long time_stamp;
221 u16 length;
222 u16 next_to_watch;
223 unsigned int segs;
224 unsigned int bytecount;
225 u16 mapped_as_page;
226 };
227 /* Rx */
228 struct {
229 /* arrays of page information for packet split */
230 struct e1000_ps_page *ps_pages;
231 struct page *page;
232 };
233 };
234};
235
236struct e1000_ring {
237 void *desc; /* pointer to ring memory */
238 dma_addr_t dma; /* phys address of ring */
239 unsigned int size; /* length of ring in bytes */
240 unsigned int count; /* number of desc. in ring */
241
242 u16 next_to_use;
243 u16 next_to_clean;
244
245 u16 head;
246 u16 tail;
247
248 /* array of buffer information structs */
249 struct e1000_buffer *buffer_info;
250
251 char name[IFNAMSIZ + 5];
252 u32 ims_val;
253 u32 itr_val;
254 u16 itr_register;
255 int set_itr;
256
257 struct sk_buff *rx_skb_top;
258};
259
260/* PHY register snapshot values */
261struct e1000_phy_regs {
262 u16 bmcr; /* basic mode control register */
263 u16 bmsr; /* basic mode status register */
264 u16 advertise; /* auto-negotiation advertisement */
265 u16 lpa; /* link partner ability register */
266 u16 expansion; /* auto-negotiation expansion reg */
267 u16 ctrl1000; /* 1000BASE-T control register */
268 u16 stat1000; /* 1000BASE-T status register */
269 u16 estatus; /* extended status register */
270};
271
272/* board specific private data structure */
273struct e1000_adapter {
274 struct timer_list watchdog_timer;
275 struct timer_list phy_info_timer;
276 struct timer_list blink_timer;
277
278 struct work_struct reset_task;
279 struct work_struct watchdog_task;
280
281 const struct e1000_info *ei;
282
283 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
284 u32 bd_number;
285 u32 rx_buffer_len;
286 u16 mng_vlan_id;
287 u16 link_speed;
288 u16 link_duplex;
289 u16 eeprom_vers;
290
291 /* track device up/down/testing state */
292 unsigned long state;
293
294 /* Interrupt Throttle Rate */
295 u32 itr;
296 u32 itr_setting;
297 u16 tx_itr;
298 u16 rx_itr;
299
300 /*
301 * Tx
302 */
303 struct e1000_ring *tx_ring /* One per active queue */
304 ____cacheline_aligned_in_smp;
305
306 struct napi_struct napi;
307
308 unsigned int restart_queue;
309 u32 txd_cmd;
310
311 bool detect_tx_hung;
312 u8 tx_timeout_factor;
313
314 u32 tx_int_delay;
315 u32 tx_abs_int_delay;
316
317 unsigned int total_tx_bytes;
318 unsigned int total_tx_packets;
319 unsigned int total_rx_bytes;
320 unsigned int total_rx_packets;
321
322 /* Tx stats */
323 u64 tpt_old;
324 u64 colc_old;
325 u32 gotc;
326 u64 gotc_old;
327 u32 tx_timeout_count;
328 u32 tx_fifo_head;
329 u32 tx_head_addr;
330 u32 tx_fifo_size;
331 u32 tx_dma_failed;
332
333 /*
334 * Rx
335 */
336 bool (*clean_rx) (struct e1000_adapter *adapter,
337 int *work_done, int work_to_do)
338 ____cacheline_aligned_in_smp;
339 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
340 int cleaned_count, gfp_t gfp);
341 struct e1000_ring *rx_ring;
342
343 u32 rx_int_delay;
344 u32 rx_abs_int_delay;
345
346 /* Rx stats */
347 u64 hw_csum_err;
348 u64 hw_csum_good;
349 u64 rx_hdr_split;
350 u32 gorc;
351 u64 gorc_old;
352 u32 alloc_rx_buff_failed;
353 u32 rx_dma_failed;
354
355 unsigned int rx_ps_pages;
356 u16 rx_ps_bsize0;
357 u32 max_frame_size;
358 u32 min_frame_size;
359
360 /* OS defined structs */
361 struct net_device *netdev;
362 struct pci_dev *pdev;
363
364 /* structs defined in e1000_hw.h */
365 struct e1000_hw hw;
366
367 spinlock_t stats64_lock;
368 struct e1000_hw_stats stats;
369 struct e1000_phy_info phy_info;
370 struct e1000_phy_stats phy_stats;
371
372 /* Snapshot of PHY registers */
373 struct e1000_phy_regs phy_regs;
374
375 struct e1000_ring test_tx_ring;
376 struct e1000_ring test_rx_ring;
377 u32 test_icr;
378
379 u32 msg_enable;
380 unsigned int num_vectors;
381 struct msix_entry *msix_entries;
382 int int_mode;
383 u32 eiac_mask;
384
385 u32 eeprom_wol;
386 u32 wol;
387 u32 pba;
388 u32 max_hw_frame_size;
389
390 bool fc_autoneg;
391
392 unsigned int flags;
393 unsigned int flags2;
394 struct work_struct downshift_task;
395 struct work_struct update_phy_task;
396 struct work_struct print_hang_task;
397
398 bool idle_check;
399 int phy_hang_count;
400};
401
402struct e1000_info {
403 enum e1000_mac_type mac;
404 unsigned int flags;
405 unsigned int flags2;
406 u32 pba;
407 u32 max_hw_frame_size;
408 s32 (*get_variants)(struct e1000_adapter *);
409 struct e1000_mac_operations *mac_ops;
410 struct e1000_phy_operations *phy_ops;
411 struct e1000_nvm_operations *nvm_ops;
412};
413
414/* hardware capability, feature, and workaround flags */
415#define FLAG_HAS_AMT (1 << 0)
416#define FLAG_HAS_FLASH (1 << 1)
417#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
418#define FLAG_HAS_WOL (1 << 3)
419#define FLAG_HAS_ERT (1 << 4)
420#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
421#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
422#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
423#define FLAG_READ_ONLY_NVM (1 << 8)
424#define FLAG_IS_ICH (1 << 9)
425#define FLAG_HAS_MSIX (1 << 10)
426#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
427#define FLAG_IS_QUAD_PORT_A (1 << 12)
428#define FLAG_IS_QUAD_PORT (1 << 13)
429#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
430#define FLAG_APME_IN_WUC (1 << 15)
431#define FLAG_APME_IN_CTRL3 (1 << 16)
432#define FLAG_APME_CHECK_PORT_B (1 << 17)
433#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
434#define FLAG_NO_WAKE_UCAST (1 << 19)
435#define FLAG_MNG_PT_ENABLED (1 << 20)
436#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
437#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
438#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
439#define FLAG_RX_NEEDS_RESTART (1 << 24)
440#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
441#define FLAG_SMART_POWER_DOWN (1 << 26)
442#define FLAG_MSI_ENABLED (1 << 27)
443#define FLAG_RX_CSUM_ENABLED (1 << 28)
444#define FLAG_TSO_FORCE (1 << 29)
445#define FLAG_RX_RESTART_NOW (1 << 30)
446#define FLAG_MSI_TEST_FAILED (1 << 31)
447
448/* CRC Stripping defines */
449#define FLAG2_CRC_STRIPPING (1 << 0)
450#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
451#define FLAG2_IS_DISCARDING (1 << 2)
452#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
453#define FLAG2_HAS_PHY_STATS (1 << 4)
454#define FLAG2_HAS_EEE (1 << 5)
455#define FLAG2_DMA_BURST (1 << 6)
456#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
457#define FLAG2_DISABLE_AIM (1 << 8)
458#define FLAG2_CHECK_PHY_HANG (1 << 9)
459#define FLAG2_NO_DISABLE_RX (1 << 10)
460#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
461
462#define E1000_RX_DESC_PS(R, i) \
463 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
464#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
465#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
466#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
467#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
468
469enum e1000_state_t {
470 __E1000_TESTING,
471 __E1000_RESETTING,
472 __E1000_DOWN
473};
474
475enum latency_range {
476 lowest_latency = 0,
477 low_latency = 1,
478 bulk_latency = 2,
479 latency_invalid = 255
480};
481
482extern char e1000e_driver_name[];
483extern const char e1000e_driver_version[];
484
485extern void e1000e_check_options(struct e1000_adapter *adapter);
486extern void e1000e_set_ethtool_ops(struct net_device *netdev);
487
488extern int e1000e_up(struct e1000_adapter *adapter);
489extern void e1000e_down(struct e1000_adapter *adapter);
490extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
491extern void e1000e_reset(struct e1000_adapter *adapter);
492extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
493extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
494extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
496extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
497extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
498 struct rtnl_link_stats64
499 *stats);
500extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
501extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
502extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
503extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
504
505extern unsigned int copybreak;
506
507extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
508
509extern struct e1000_info e1000_82571_info;
510extern struct e1000_info e1000_82572_info;
511extern struct e1000_info e1000_82573_info;
512extern struct e1000_info e1000_82574_info;
513extern struct e1000_info e1000_82583_info;
514extern struct e1000_info e1000_ich8_info;
515extern struct e1000_info e1000_ich9_info;
516extern struct e1000_info e1000_ich10_info;
517extern struct e1000_info e1000_pch_info;
518extern struct e1000_info e1000_pch2_info;
519extern struct e1000_info e1000_es2_info;
520
521extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
522 u32 pba_num_size);
523
524extern s32 e1000e_commit_phy(struct e1000_hw *hw);
525
526extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
527
528extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
529extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
530
531extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
532extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
533 bool state);
534extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
535extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
536extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
537extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
538extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
539extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
540extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
541
542extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
543extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
544extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
545extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
546extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
547extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
548extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
549extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
550extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
551extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
552extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
553extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
554extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
555extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
556extern s32 e1000e_id_led_init(struct e1000_hw *hw);
557extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
558extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
559extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
560extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
561extern s32 e1000e_setup_link(struct e1000_hw *hw);
562extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
563extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
564extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
565 u8 *mc_addr_list,
566 u32 mc_addr_count);
567extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
568extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
569extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
570extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
571extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
572extern void e1000e_config_collision_dist(struct e1000_hw *hw);
573extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
574extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
575extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
576extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
577extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
578extern void e1000e_reset_adaptive(struct e1000_hw *hw);
579extern void e1000e_update_adaptive(struct e1000_hw *hw);
580
581extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
582extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
583extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
584extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
585extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
586extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
587extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
588extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
589extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
590extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
591 u16 *data);
592extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
593extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
594extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
595extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
596 u16 data);
597extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
598extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
599extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
600extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
601extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
602extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
603extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
604extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
605extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
606extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
607extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
608extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
609extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
610 u16 *phy_reg);
611extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
612 u16 *phy_reg);
613extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
614extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
615extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
616extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
617extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
618 u16 data);
619extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
620extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
621 u16 *data);
622extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
623 u32 usec_interval, bool *success);
624extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
625extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
626extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
627extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
628extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
629extern s32 e1000e_check_downshift(struct e1000_hw *hw);
630extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
631extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
632 u16 *data);
633extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
634 u16 *data);
635extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
636extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
637 u16 data);
638extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
639 u16 data);
640extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
641extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
642extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
643extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
644extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
645extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
646
647extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
648extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
649extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
650extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
651extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
652extern bool e1000_check_phy_82574(struct e1000_hw *hw);
653
654static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
655{
656 return hw->phy.ops.reset(hw);
657}
658
659static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
660{
661 return hw->phy.ops.check_reset_block(hw);
662}
663
664static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
665{
666 return hw->phy.ops.read_reg(hw, offset, data);
667}
668
669static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
670{
671 return hw->phy.ops.write_reg(hw, offset, data);
672}
673
674static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
675{
676 return hw->phy.ops.get_cable_length(hw);
677}
678
679extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
680extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
681extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
682extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
683extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
684extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
685extern void e1000e_release_nvm(struct e1000_hw *hw);
686extern void e1000e_reload_nvm(struct e1000_hw *hw);
687extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
688
689static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
690{
691 if (hw->mac.ops.read_mac_addr)
692 return hw->mac.ops.read_mac_addr(hw);
693
694 return e1000_read_mac_addr_generic(hw);
695}
696
697static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
698{
699 return hw->nvm.ops.validate(hw);
700}
701
702static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
703{
704 return hw->nvm.ops.update(hw);
705}
706
707static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
708{
709 return hw->nvm.ops.read(hw, offset, words, data);
710}
711
712static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
713{
714 return hw->nvm.ops.write(hw, offset, words, data);
715}
716
717static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
718{
719 return hw->phy.ops.get_info(hw);
720}
721
722static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
723{
724 return hw->mac.ops.check_mng_mode(hw);
725}
726
727extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
728extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
729extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
730
731static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
732{
733 return readl(hw->hw_addr + reg);
734}
735
736static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
737{
738 writel(val, hw->hw_addr + reg);
739}
740
741#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
new file mode 100644
index 00000000000..e4f42257c24
--- /dev/null
+++ b/drivers/net/e1000e/es2lan.c
@@ -0,0 +1,1516 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 80003ES2LAN Gigabit Ethernet Controller (Copper)
31 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
32 */
33
34#include "e1000.h"
35
36#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
37#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
38#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
39#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
40
41#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
42#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
43#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
44
45#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
46#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
47#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
48
49#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
50#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
51
52#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
53#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
54
55#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
56#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
57
58/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
59#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
60#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
61#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
62#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
63#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
64
65/* PHY Specific Control Register 2 (Page 0, Register 26) */
66#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
67 /* 1=Reverse Auto-Negotiation */
68
69/* MAC Specific Control Register (Page 2, Register 21) */
70/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
71#define GG82563_MSCR_TX_CLK_MASK 0x0007
72#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
73#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
74#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
75
76#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
77
78/* DSP Distance Register (Page 5, Register 26) */
79#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
80 1 = 50-80M
81 2 = 80-110M
82 3 = 110-140M
83 4 = >140M */
84
85/* Kumeran Mode Control Register (Page 193, Register 16) */
86#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
87
88/* Max number of times Kumeran read/write should be validated */
89#define GG82563_MAX_KMRN_RETRY 0x5
90
91/* Power Management Control Register (Page 193, Register 20) */
92#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
93 /* 1=Enable SERDES Electrical Idle */
94
95/* In-Band Control Register (Page 194, Register 18) */
96#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
97
98/*
99 * A table for the GG82563 cable length where the range is defined
100 * with a lower bound at "index" and the upper bound at
101 * "index + 5".
102 */
103static const u16 e1000_gg82563_cable_length_table[] = {
104 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
105#define GG82563_CABLE_LENGTH_TABLE_SIZE \
106 ARRAY_SIZE(e1000_gg82563_cable_length_table)
107
108static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
109static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
110static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
111static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
112static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
113static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
114static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
115static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
116static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
117 u16 *data);
118static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
119 u16 data);
120static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
121
122/**
123 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
124 * @hw: pointer to the HW structure
125 **/
126static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
127{
128 struct e1000_phy_info *phy = &hw->phy;
129 s32 ret_val;
130
131 if (hw->phy.media_type != e1000_media_type_copper) {
132 phy->type = e1000_phy_none;
133 return 0;
134 } else {
135 phy->ops.power_up = e1000_power_up_phy_copper;
136 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
137 }
138
139 phy->addr = 1;
140 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
141 phy->reset_delay_us = 100;
142 phy->type = e1000_phy_gg82563;
143
144 /* This can only be done after all function pointers are setup. */
145 ret_val = e1000e_get_phy_id(hw);
146
147 /* Verify phy id */
148 if (phy->id != GG82563_E_PHY_ID)
149 return -E1000_ERR_PHY;
150
151 return ret_val;
152}
153
154/**
155 * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
156 * @hw: pointer to the HW structure
157 **/
158static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
159{
160 struct e1000_nvm_info *nvm = &hw->nvm;
161 u32 eecd = er32(EECD);
162 u16 size;
163
164 nvm->opcode_bits = 8;
165 nvm->delay_usec = 1;
166 switch (nvm->override) {
167 case e1000_nvm_override_spi_large:
168 nvm->page_size = 32;
169 nvm->address_bits = 16;
170 break;
171 case e1000_nvm_override_spi_small:
172 nvm->page_size = 8;
173 nvm->address_bits = 8;
174 break;
175 default:
176 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
177 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
178 break;
179 }
180
181 nvm->type = e1000_nvm_eeprom_spi;
182
183 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
184 E1000_EECD_SIZE_EX_SHIFT);
185
186 /*
187 * Added to a constant, "size" becomes the left-shift value
188 * for setting word_size.
189 */
190 size += NVM_WORD_SIZE_BASE_SHIFT;
191
192 /* EEPROM access above 16k is unsupported */
193 if (size > 14)
194 size = 14;
195 nvm->word_size = 1 << size;
196
197 return 0;
198}
199
200/**
201 * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
202 * @hw: pointer to the HW structure
203 **/
204static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
205{
206 struct e1000_hw *hw = &adapter->hw;
207 struct e1000_mac_info *mac = &hw->mac;
208 struct e1000_mac_operations *func = &mac->ops;
209
210 /* Set media type */
211 switch (adapter->pdev->device) {
212 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
213 hw->phy.media_type = e1000_media_type_internal_serdes;
214 break;
215 default:
216 hw->phy.media_type = e1000_media_type_copper;
217 break;
218 }
219
220 /* Set mta register count */
221 mac->mta_reg_count = 128;
222 /* Set rar entry count */
223 mac->rar_entry_count = E1000_RAR_ENTRIES;
224 /* FWSM register */
225 mac->has_fwsm = true;
226 /* ARC supported; valid only if manageability features are enabled. */
227 mac->arc_subsystem_valid =
228 (er32(FWSM) & E1000_FWSM_MODE_MASK)
229 ? true : false;
230 /* Adaptive IFS not supported */
231 mac->adaptive_ifs = false;
232
233 /* check for link */
234 switch (hw->phy.media_type) {
235 case e1000_media_type_copper:
236 func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
237 func->check_for_link = e1000e_check_for_copper_link;
238 break;
239 case e1000_media_type_fiber:
240 func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
241 func->check_for_link = e1000e_check_for_fiber_link;
242 break;
243 case e1000_media_type_internal_serdes:
244 func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
245 func->check_for_link = e1000e_check_for_serdes_link;
246 break;
247 default:
248 return -E1000_ERR_CONFIG;
249 break;
250 }
251
252 /* set lan id for port to determine which phy lock to use */
253 hw->mac.ops.set_lan_id(hw);
254
255 return 0;
256}
257
258static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
259{
260 struct e1000_hw *hw = &adapter->hw;
261 s32 rc;
262
263 rc = e1000_init_mac_params_80003es2lan(adapter);
264 if (rc)
265 return rc;
266
267 rc = e1000_init_nvm_params_80003es2lan(hw);
268 if (rc)
269 return rc;
270
271 rc = e1000_init_phy_params_80003es2lan(hw);
272 if (rc)
273 return rc;
274
275 return 0;
276}
277
278/**
279 * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
280 * @hw: pointer to the HW structure
281 *
282 * A wrapper to acquire access rights to the correct PHY.
283 **/
284static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
285{
286 u16 mask;
287
288 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
289 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
290}
291
292/**
293 * e1000_release_phy_80003es2lan - Release rights to access PHY
294 * @hw: pointer to the HW structure
295 *
296 * A wrapper to release access rights to the correct PHY.
297 **/
298static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
299{
300 u16 mask;
301
302 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
303 e1000_release_swfw_sync_80003es2lan(hw, mask);
304}
305
306/**
307 * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
308 * @hw: pointer to the HW structure
309 *
310 * Acquire the semaphore to access the Kumeran interface.
311 *
312 **/
313static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
314{
315 u16 mask;
316
317 mask = E1000_SWFW_CSR_SM;
318
319 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
320}
321
322/**
323 * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
324 * @hw: pointer to the HW structure
325 *
326 * Release the semaphore used to access the Kumeran interface
327 **/
328static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
329{
330 u16 mask;
331
332 mask = E1000_SWFW_CSR_SM;
333
334 e1000_release_swfw_sync_80003es2lan(hw, mask);
335}
336
337/**
338 * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
339 * @hw: pointer to the HW structure
340 *
341 * Acquire the semaphore to access the EEPROM.
342 **/
343static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
344{
345 s32 ret_val;
346
347 ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
348 if (ret_val)
349 return ret_val;
350
351 ret_val = e1000e_acquire_nvm(hw);
352
353 if (ret_val)
354 e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
355
356 return ret_val;
357}
358
359/**
360 * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
361 * @hw: pointer to the HW structure
362 *
363 * Release the semaphore used to access the EEPROM.
364 **/
365static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
366{
367 e1000e_release_nvm(hw);
368 e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
369}
370
371/**
372 * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
373 * @hw: pointer to the HW structure
374 * @mask: specifies which semaphore to acquire
375 *
376 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
377 * will also specify which port we're acquiring the lock for.
378 **/
379static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
380{
381 u32 swfw_sync;
382 u32 swmask = mask;
383 u32 fwmask = mask << 16;
384 s32 i = 0;
385 s32 timeout = 50;
386
387 while (i < timeout) {
388 if (e1000e_get_hw_semaphore(hw))
389 return -E1000_ERR_SWFW_SYNC;
390
391 swfw_sync = er32(SW_FW_SYNC);
392 if (!(swfw_sync & (fwmask | swmask)))
393 break;
394
395 /*
396 * Firmware currently using resource (fwmask)
397 * or other software thread using resource (swmask)
398 */
399 e1000e_put_hw_semaphore(hw);
400 mdelay(5);
401 i++;
402 }
403
404 if (i == timeout) {
405 e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
406 return -E1000_ERR_SWFW_SYNC;
407 }
408
409 swfw_sync |= swmask;
410 ew32(SW_FW_SYNC, swfw_sync);
411
412 e1000e_put_hw_semaphore(hw);
413
414 return 0;
415}
416
417/**
418 * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
419 * @hw: pointer to the HW structure
420 * @mask: specifies which semaphore to acquire
421 *
422 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
423 * will also specify which port we're releasing the lock for.
424 **/
425static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
426{
427 u32 swfw_sync;
428
429 while (e1000e_get_hw_semaphore(hw) != 0)
430 ; /* Empty */
431
432 swfw_sync = er32(SW_FW_SYNC);
433 swfw_sync &= ~mask;
434 ew32(SW_FW_SYNC, swfw_sync);
435
436 e1000e_put_hw_semaphore(hw);
437}
438
439/**
440 * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
441 * @hw: pointer to the HW structure
442 * @offset: offset of the register to read
443 * @data: pointer to the data returned from the operation
444 *
445 * Read the GG82563 PHY register.
446 **/
447static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
448 u32 offset, u16 *data)
449{
450 s32 ret_val;
451 u32 page_select;
452 u16 temp;
453
454 ret_val = e1000_acquire_phy_80003es2lan(hw);
455 if (ret_val)
456 return ret_val;
457
458 /* Select Configuration Page */
459 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
460 page_select = GG82563_PHY_PAGE_SELECT;
461 } else {
462 /*
463 * Use Alternative Page Select register to access
464 * registers 30 and 31
465 */
466 page_select = GG82563_PHY_PAGE_SELECT_ALT;
467 }
468
469 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
470 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
471 if (ret_val) {
472 e1000_release_phy_80003es2lan(hw);
473 return ret_val;
474 }
475
476 if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
477 /*
478 * The "ready" bit in the MDIC register may be incorrectly set
479 * before the device has completed the "Page Select" MDI
480 * transaction. So we wait 200us after each MDI command...
481 */
482 udelay(200);
483
484 /* ...and verify the command was successful. */
485 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
486
487 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
488 ret_val = -E1000_ERR_PHY;
489 e1000_release_phy_80003es2lan(hw);
490 return ret_val;
491 }
492
493 udelay(200);
494
495 ret_val = e1000e_read_phy_reg_mdic(hw,
496 MAX_PHY_REG_ADDRESS & offset,
497 data);
498
499 udelay(200);
500 } else {
501 ret_val = e1000e_read_phy_reg_mdic(hw,
502 MAX_PHY_REG_ADDRESS & offset,
503 data);
504 }
505
506 e1000_release_phy_80003es2lan(hw);
507
508 return ret_val;
509}
510
511/**
512 * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
513 * @hw: pointer to the HW structure
514 * @offset: offset of the register to read
515 * @data: value to write to the register
516 *
517 * Write to the GG82563 PHY register.
518 **/
519static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
520 u32 offset, u16 data)
521{
522 s32 ret_val;
523 u32 page_select;
524 u16 temp;
525
526 ret_val = e1000_acquire_phy_80003es2lan(hw);
527 if (ret_val)
528 return ret_val;
529
530 /* Select Configuration Page */
531 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
532 page_select = GG82563_PHY_PAGE_SELECT;
533 } else {
534 /*
535 * Use Alternative Page Select register to access
536 * registers 30 and 31
537 */
538 page_select = GG82563_PHY_PAGE_SELECT_ALT;
539 }
540
541 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
542 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
543 if (ret_val) {
544 e1000_release_phy_80003es2lan(hw);
545 return ret_val;
546 }
547
548 if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
549 /*
550 * The "ready" bit in the MDIC register may be incorrectly set
551 * before the device has completed the "Page Select" MDI
552 * transaction. So we wait 200us after each MDI command...
553 */
554 udelay(200);
555
556 /* ...and verify the command was successful. */
557 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
558
559 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
560 e1000_release_phy_80003es2lan(hw);
561 return -E1000_ERR_PHY;
562 }
563
564 udelay(200);
565
566 ret_val = e1000e_write_phy_reg_mdic(hw,
567 MAX_PHY_REG_ADDRESS & offset,
568 data);
569
570 udelay(200);
571 } else {
572 ret_val = e1000e_write_phy_reg_mdic(hw,
573 MAX_PHY_REG_ADDRESS & offset,
574 data);
575 }
576
577 e1000_release_phy_80003es2lan(hw);
578
579 return ret_val;
580}
581
582/**
583 * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
584 * @hw: pointer to the HW structure
585 * @offset: offset of the register to read
586 * @words: number of words to write
587 * @data: buffer of data to write to the NVM
588 *
589 * Write "words" of data to the ESB2 NVM.
590 **/
591static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
592 u16 words, u16 *data)
593{
594 return e1000e_write_nvm_spi(hw, offset, words, data);
595}
596
597/**
598 * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
599 * @hw: pointer to the HW structure
600 *
601 * Wait a specific amount of time for manageability processes to complete.
602 * This is a function pointer entry point called by the phy module.
603 **/
604static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
605{
606 s32 timeout = PHY_CFG_TIMEOUT;
607 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
608
609 if (hw->bus.func == 1)
610 mask = E1000_NVM_CFG_DONE_PORT_1;
611
612 while (timeout) {
613 if (er32(EEMNGCTL) & mask)
614 break;
615 usleep_range(1000, 2000);
616 timeout--;
617 }
618 if (!timeout) {
619 e_dbg("MNG configuration cycle has not completed.\n");
620 return -E1000_ERR_RESET;
621 }
622
623 return 0;
624}
625
626/**
627 * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
628 * @hw: pointer to the HW structure
629 *
630 * Force the speed and duplex settings onto the PHY. This is a
631 * function pointer entry point called by the phy module.
632 **/
633static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
634{
635 s32 ret_val;
636 u16 phy_data;
637 bool link;
638
639 /*
640 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
641 * forced whenever speed and duplex are forced.
642 */
643 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
644 if (ret_val)
645 return ret_val;
646
647 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
648 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
649 if (ret_val)
650 return ret_val;
651
652 e_dbg("GG82563 PSCR: %X\n", phy_data);
653
654 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
655 if (ret_val)
656 return ret_val;
657
658 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
659
660 /* Reset the phy to commit changes. */
661 phy_data |= MII_CR_RESET;
662
663 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
664 if (ret_val)
665 return ret_val;
666
667 udelay(1);
668
669 if (hw->phy.autoneg_wait_to_complete) {
670 e_dbg("Waiting for forced speed/duplex link "
671 "on GG82563 phy.\n");
672
673 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
674 100000, &link);
675 if (ret_val)
676 return ret_val;
677
678 if (!link) {
679 /*
680 * We didn't get link.
681 * Reset the DSP and cross our fingers.
682 */
683 ret_val = e1000e_phy_reset_dsp(hw);
684 if (ret_val)
685 return ret_val;
686 }
687
688 /* Try once more */
689 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
690 100000, &link);
691 if (ret_val)
692 return ret_val;
693 }
694
695 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
696 if (ret_val)
697 return ret_val;
698
699 /*
700 * Resetting the phy means we need to verify the TX_CLK corresponds
701 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
702 */
703 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
704 if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
705 phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
706 else
707 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
708
709 /*
710 * In addition, we must re-enable CRS on Tx for both half and full
711 * duplex.
712 */
713 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
714 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
715
716 return ret_val;
717}
718
719/**
720 * e1000_get_cable_length_80003es2lan - Set approximate cable length
721 * @hw: pointer to the HW structure
722 *
723 * Find the approximate cable length as measured by the GG82563 PHY.
724 * This is a function pointer entry point called by the phy module.
725 **/
726static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
727{
728 struct e1000_phy_info *phy = &hw->phy;
729 s32 ret_val = 0;
730 u16 phy_data, index;
731
732 ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
733 if (ret_val)
734 goto out;
735
736 index = phy_data & GG82563_DSPD_CABLE_LENGTH;
737
738 if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
739 ret_val = -E1000_ERR_PHY;
740 goto out;
741 }
742
743 phy->min_cable_length = e1000_gg82563_cable_length_table[index];
744 phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
745
746 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
747
748out:
749 return ret_val;
750}
751
752/**
753 * e1000_get_link_up_info_80003es2lan - Report speed and duplex
754 * @hw: pointer to the HW structure
755 * @speed: pointer to speed buffer
756 * @duplex: pointer to duplex buffer
757 *
758 * Retrieve the current speed and duplex configuration.
759 **/
760static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
761 u16 *duplex)
762{
763 s32 ret_val;
764
765 if (hw->phy.media_type == e1000_media_type_copper) {
766 ret_val = e1000e_get_speed_and_duplex_copper(hw,
767 speed,
768 duplex);
769 hw->phy.ops.cfg_on_link_up(hw);
770 } else {
771 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
772 speed,
773 duplex);
774 }
775
776 return ret_val;
777}
778
779/**
780 * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
781 * @hw: pointer to the HW structure
782 *
783 * Perform a global reset to the ESB2 controller.
784 **/
785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
786{
787 u32 ctrl;
788 s32 ret_val;
789
790 /*
791 * Prevent the PCI-E bus from sticking if there is no TLP connection
792 * on the last TLP read/write transaction when MAC is reset.
793 */
794 ret_val = e1000e_disable_pcie_master(hw);
795 if (ret_val)
796 e_dbg("PCI-E Master disable polling has failed.\n");
797
798 e_dbg("Masking off all interrupts\n");
799 ew32(IMC, 0xffffffff);
800
801 ew32(RCTL, 0);
802 ew32(TCTL, E1000_TCTL_PSP);
803 e1e_flush();
804
805 usleep_range(10000, 20000);
806
807 ctrl = er32(CTRL);
808
809 ret_val = e1000_acquire_phy_80003es2lan(hw);
810 e_dbg("Issuing a global reset to MAC\n");
811 ew32(CTRL, ctrl | E1000_CTRL_RST);
812 e1000_release_phy_80003es2lan(hw);
813
814 ret_val = e1000e_get_auto_rd_done(hw);
815 if (ret_val)
816 /* We don't want to continue accessing MAC registers. */
817 return ret_val;
818
819 /* Clear any pending interrupt events. */
820 ew32(IMC, 0xffffffff);
821 er32(ICR);
822
823 ret_val = e1000_check_alt_mac_addr_generic(hw);
824
825 return ret_val;
826}
827
828/**
829 * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
830 * @hw: pointer to the HW structure
831 *
832 * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
833 **/
834static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
835{
836 struct e1000_mac_info *mac = &hw->mac;
837 u32 reg_data;
838 s32 ret_val;
839 u16 kum_reg_data;
840 u16 i;
841
842 e1000_initialize_hw_bits_80003es2lan(hw);
843
844 /* Initialize identification LED */
845 ret_val = e1000e_id_led_init(hw);
846 if (ret_val)
847 e_dbg("Error initializing identification LED\n");
848 /* This is not fatal and we should not stop init due to this */
849
850 /* Disabling VLAN filtering */
851 e_dbg("Initializing the IEEE VLAN\n");
852 mac->ops.clear_vfta(hw);
853
854 /* Setup the receive address. */
855 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
856
857 /* Zero out the Multicast HASH table */
858 e_dbg("Zeroing the MTA\n");
859 for (i = 0; i < mac->mta_reg_count; i++)
860 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
861
862 /* Setup link and flow control */
863 ret_val = e1000e_setup_link(hw);
864
865 /* Disable IBIST slave mode (far-end loopback) */
866 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
867 &kum_reg_data);
868 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
869 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
870 kum_reg_data);
871
872 /* Set the transmit descriptor write-back policy */
873 reg_data = er32(TXDCTL(0));
874 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
875 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
876 ew32(TXDCTL(0), reg_data);
877
878 /* ...for both queues. */
879 reg_data = er32(TXDCTL(1));
880 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
881 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
882 ew32(TXDCTL(1), reg_data);
883
884 /* Enable retransmit on late collisions */
885 reg_data = er32(TCTL);
886 reg_data |= E1000_TCTL_RTLC;
887 ew32(TCTL, reg_data);
888
889 /* Configure Gigabit Carry Extend Padding */
890 reg_data = er32(TCTL_EXT);
891 reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
892 reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
893 ew32(TCTL_EXT, reg_data);
894
895 /* Configure Transmit Inter-Packet Gap */
896 reg_data = er32(TIPG);
897 reg_data &= ~E1000_TIPG_IPGT_MASK;
898 reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
899 ew32(TIPG, reg_data);
900
901 reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
902 reg_data &= ~0x00100000;
903 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
904
905 /* default to true to enable the MDIC W/A */
906 hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
907
908 ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
909 E1000_KMRNCTRLSTA_OFFSET >>
910 E1000_KMRNCTRLSTA_OFFSET_SHIFT,
911 &i);
912 if (!ret_val) {
913 if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
914 E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
915 hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
916 }
917
918 /*
919 * Clear all of the statistics registers (clear on read). It is
920 * important that we do this after we have tried to establish link
921 * because the symbol error count will increment wildly if there
922 * is no link.
923 */
924 e1000_clear_hw_cntrs_80003es2lan(hw);
925
926 return ret_val;
927}
928
929/**
930 * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
931 * @hw: pointer to the HW structure
932 *
933 * Initializes required hardware-dependent bits needed for normal operation.
934 **/
935static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
936{
937 u32 reg;
938
939 /* Transmit Descriptor Control 0 */
940 reg = er32(TXDCTL(0));
941 reg |= (1 << 22);
942 ew32(TXDCTL(0), reg);
943
944 /* Transmit Descriptor Control 1 */
945 reg = er32(TXDCTL(1));
946 reg |= (1 << 22);
947 ew32(TXDCTL(1), reg);
948
949 /* Transmit Arbitration Control 0 */
950 reg = er32(TARC(0));
951 reg &= ~(0xF << 27); /* 30:27 */
952 if (hw->phy.media_type != e1000_media_type_copper)
953 reg &= ~(1 << 20);
954 ew32(TARC(0), reg);
955
956 /* Transmit Arbitration Control 1 */
957 reg = er32(TARC(1));
958 if (er32(TCTL) & E1000_TCTL_MULR)
959 reg &= ~(1 << 28);
960 else
961 reg |= (1 << 28);
962 ew32(TARC(1), reg);
963}
964
965/**
966 * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
967 * @hw: pointer to the HW structure
968 *
969 * Setup some GG82563 PHY registers for obtaining link
970 **/
971static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
972{
973 struct e1000_phy_info *phy = &hw->phy;
974 s32 ret_val;
975 u32 ctrl_ext;
976 u16 data;
977
978 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
979 if (ret_val)
980 return ret_val;
981
982 data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
983 /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
984 data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
985
986 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
987 if (ret_val)
988 return ret_val;
989
990 /*
991 * Options:
992 * MDI/MDI-X = 0 (default)
993 * 0 - Auto for all speeds
994 * 1 - MDI mode
995 * 2 - MDI-X mode
996 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
997 */
998 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
999 if (ret_val)
1000 return ret_val;
1001
1002 data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
1003
1004 switch (phy->mdix) {
1005 case 1:
1006 data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
1007 break;
1008 case 2:
1009 data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
1010 break;
1011 case 0:
1012 default:
1013 data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
1014 break;
1015 }
1016
1017 /*
1018 * Options:
1019 * disable_polarity_correction = 0 (default)
1020 * Automatic Correction for Reversed Cable Polarity
1021 * 0 - Disabled
1022 * 1 - Enabled
1023 */
1024 data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1025 if (phy->disable_polarity_correction)
1026 data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1027
1028 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
1029 if (ret_val)
1030 return ret_val;
1031
1032 /* SW Reset the PHY so all changes take effect */
1033 ret_val = e1000e_commit_phy(hw);
1034 if (ret_val) {
1035 e_dbg("Error Resetting the PHY\n");
1036 return ret_val;
1037 }
1038
1039 /* Bypass Rx and Tx FIFO's */
1040 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1041 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
1042 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
1043 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
1044 if (ret_val)
1045 return ret_val;
1046
1047 ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
1048 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
1049 &data);
1050 if (ret_val)
1051 return ret_val;
1052 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
1053 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1054 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
1055 data);
1056 if (ret_val)
1057 return ret_val;
1058
1059 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
1060 if (ret_val)
1061 return ret_val;
1062
1063 data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
1064 ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
1065 if (ret_val)
1066 return ret_val;
1067
1068 ctrl_ext = er32(CTRL_EXT);
1069 ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
1070 ew32(CTRL_EXT, ctrl_ext);
1071
1072 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
1073 if (ret_val)
1074 return ret_val;
1075
1076 /*
1077 * Do not init these registers when the HW is in IAMT mode, since the
1078 * firmware will have already initialized them. We only initialize
1079 * them if the HW is not in IAMT mode.
1080 */
1081 if (!e1000e_check_mng_mode(hw)) {
1082 /* Enable Electrical Idle on the PHY */
1083 data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
1084 ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
1085 if (ret_val)
1086 return ret_val;
1087
1088 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
1089 if (ret_val)
1090 return ret_val;
1091
1092 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1093 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
1094 if (ret_val)
1095 return ret_val;
1096 }
1097
1098 /*
1099 * Workaround: Disable padding in Kumeran interface in the MAC
1100 * and in the PHY to avoid CRC errors.
1101 */
1102 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
1103 if (ret_val)
1104 return ret_val;
1105
1106 data |= GG82563_ICR_DIS_PADDING;
1107 ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
1108 if (ret_val)
1109 return ret_val;
1110
1111 return 0;
1112}
1113
1114/**
1115 * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
1116 * @hw: pointer to the HW structure
1117 *
1118 * Essentially a wrapper for setting up all things "copper" related.
1119 * This is a function pointer entry point called by the mac module.
1120 **/
1121static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1122{
1123 u32 ctrl;
1124 s32 ret_val;
1125 u16 reg_data;
1126
1127 ctrl = er32(CTRL);
1128 ctrl |= E1000_CTRL_SLU;
1129 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1130 ew32(CTRL, ctrl);
1131
1132 /*
1133 * Set the mac to wait the maximum time between each
1134 * iteration and increase the max iterations when
1135 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1136 */
1137 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
1138 0xFFFF);
1139 if (ret_val)
1140 return ret_val;
1141 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1142 &reg_data);
1143 if (ret_val)
1144 return ret_val;
1145 reg_data |= 0x3F;
1146 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1147 reg_data);
1148 if (ret_val)
1149 return ret_val;
1150 ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
1151 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1152 &reg_data);
1153 if (ret_val)
1154 return ret_val;
1155 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1156 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1157 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1158 reg_data);
1159 if (ret_val)
1160 return ret_val;
1161
1162 ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
1163 if (ret_val)
1164 return ret_val;
1165
1166 ret_val = e1000e_setup_copper_link(hw);
1167
1168 return 0;
1169}
1170
1171/**
1172 * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
1173 * @hw: pointer to the HW structure
1174 * @duplex: current duplex setting
1175 *
1176 * Configure the KMRN interface by applying last minute quirks for
1177 * 10/100 operation.
1178 **/
1179static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
1180{
1181 s32 ret_val = 0;
1182 u16 speed;
1183 u16 duplex;
1184
1185 if (hw->phy.media_type == e1000_media_type_copper) {
1186 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
1187 &duplex);
1188 if (ret_val)
1189 return ret_val;
1190
1191 if (speed == SPEED_1000)
1192 ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
1193 else
1194 ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
1195 }
1196
1197 return ret_val;
1198}
1199
1200/**
1201 * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
1202 * @hw: pointer to the HW structure
1203 * @duplex: current duplex setting
1204 *
1205 * Configure the KMRN interface by applying last minute quirks for
1206 * 10/100 operation.
1207 **/
1208static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1209{
1210 s32 ret_val;
1211 u32 tipg;
1212 u32 i = 0;
1213 u16 reg_data, reg_data2;
1214
1215 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1216 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1217 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1218 reg_data);
1219 if (ret_val)
1220 return ret_val;
1221
1222 /* Configure Transmit Inter-Packet Gap */
1223 tipg = er32(TIPG);
1224 tipg &= ~E1000_TIPG_IPGT_MASK;
1225 tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
1226 ew32(TIPG, tipg);
1227
1228 do {
1229 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1230 if (ret_val)
1231 return ret_val;
1232
1233 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
1234 if (ret_val)
1235 return ret_val;
1236 i++;
1237 } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
1238
1239 if (duplex == HALF_DUPLEX)
1240 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
1241 else
1242 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1243
1244 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1245
1246 return 0;
1247}
1248
1249/**
1250 * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
1251 * @hw: pointer to the HW structure
1252 *
1253 * Configure the KMRN interface by applying last minute quirks for
1254 * gigabit operation.
1255 **/
1256static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1257{
1258 s32 ret_val;
1259 u16 reg_data, reg_data2;
1260 u32 tipg;
1261 u32 i = 0;
1262
1263 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1264 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1265 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1266 reg_data);
1267 if (ret_val)
1268 return ret_val;
1269
1270 /* Configure Transmit Inter-Packet Gap */
1271 tipg = er32(TIPG);
1272 tipg &= ~E1000_TIPG_IPGT_MASK;
1273 tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
1274 ew32(TIPG, tipg);
1275
1276 do {
1277 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1278 if (ret_val)
1279 return ret_val;
1280
1281 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
1282 if (ret_val)
1283 return ret_val;
1284 i++;
1285 } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
1286
1287 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1288 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1289
1290 return ret_val;
1291}
1292
1293/**
1294 * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
1295 * @hw: pointer to the HW structure
1296 * @offset: register offset to be read
1297 * @data: pointer to the read data
1298 *
1299 * Acquire semaphore, then read the PHY register at offset
1300 * using the kumeran interface. The information retrieved is stored in data.
1301 * Release the semaphore before exiting.
1302 **/
1303static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1304 u16 *data)
1305{
1306 u32 kmrnctrlsta;
1307 s32 ret_val = 0;
1308
1309 ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
1310 if (ret_val)
1311 return ret_val;
1312
1313 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1314 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
1315 ew32(KMRNCTRLSTA, kmrnctrlsta);
1316 e1e_flush();
1317
1318 udelay(2);
1319
1320 kmrnctrlsta = er32(KMRNCTRLSTA);
1321 *data = (u16)kmrnctrlsta;
1322
1323 e1000_release_mac_csr_80003es2lan(hw);
1324
1325 return ret_val;
1326}
1327
1328/**
1329 * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
1330 * @hw: pointer to the HW structure
1331 * @offset: register offset to write to
1332 * @data: data to write at register offset
1333 *
1334 * Acquire semaphore, then write the data to PHY register
1335 * at the offset using the kumeran interface. Release semaphore
1336 * before exiting.
1337 **/
1338static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1339 u16 data)
1340{
1341 u32 kmrnctrlsta;
1342 s32 ret_val = 0;
1343
1344 ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
1345 if (ret_val)
1346 return ret_val;
1347
1348 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1349 E1000_KMRNCTRLSTA_OFFSET) | data;
1350 ew32(KMRNCTRLSTA, kmrnctrlsta);
1351 e1e_flush();
1352
1353 udelay(2);
1354
1355 e1000_release_mac_csr_80003es2lan(hw);
1356
1357 return ret_val;
1358}
1359
1360/**
1361 * e1000_read_mac_addr_80003es2lan - Read device MAC address
1362 * @hw: pointer to the HW structure
1363 **/
1364static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
1365{
1366 s32 ret_val = 0;
1367
1368 /*
1369 * If there's an alternate MAC address place it in RAR0
1370 * so that it will override the Si installed default perm
1371 * address.
1372 */
1373 ret_val = e1000_check_alt_mac_addr_generic(hw);
1374 if (ret_val)
1375 goto out;
1376
1377 ret_val = e1000_read_mac_addr_generic(hw);
1378
1379out:
1380 return ret_val;
1381}
1382
1383/**
1384 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
1385 * @hw: pointer to the HW structure
1386 *
1387 * In the case of a PHY power down to save power, or to turn off link during a
1388 * driver unload, or wake on lan is not enabled, remove the link.
1389 **/
1390static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
1391{
1392 /* If the management interface is not enabled, then power down */
1393 if (!(hw->mac.ops.check_mng_mode(hw) ||
1394 hw->phy.ops.check_reset_block(hw)))
1395 e1000_power_down_phy_copper(hw);
1396}
1397
1398/**
1399 * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
1400 * @hw: pointer to the HW structure
1401 *
1402 * Clears the hardware counters by reading the counter registers.
1403 **/
1404static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1405{
1406 e1000e_clear_hw_cntrs_base(hw);
1407
1408 er32(PRC64);
1409 er32(PRC127);
1410 er32(PRC255);
1411 er32(PRC511);
1412 er32(PRC1023);
1413 er32(PRC1522);
1414 er32(PTC64);
1415 er32(PTC127);
1416 er32(PTC255);
1417 er32(PTC511);
1418 er32(PTC1023);
1419 er32(PTC1522);
1420
1421 er32(ALGNERRC);
1422 er32(RXERRC);
1423 er32(TNCRS);
1424 er32(CEXTERR);
1425 er32(TSCTC);
1426 er32(TSCTFC);
1427
1428 er32(MGTPRC);
1429 er32(MGTPDC);
1430 er32(MGTPTC);
1431
1432 er32(IAC);
1433 er32(ICRXOC);
1434
1435 er32(ICRXPTC);
1436 er32(ICRXATC);
1437 er32(ICTXPTC);
1438 er32(ICTXATC);
1439 er32(ICTXQEC);
1440 er32(ICTXQMTC);
1441 er32(ICRXDMTC);
1442}
1443
1444static struct e1000_mac_operations es2_mac_ops = {
1445 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
1446 .id_led_init = e1000e_id_led_init,
1447 .blink_led = e1000e_blink_led_generic,
1448 .check_mng_mode = e1000e_check_mng_mode_generic,
1449 /* check_for_link dependent on media type */
1450 .cleanup_led = e1000e_cleanup_led_generic,
1451 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
1452 .get_bus_info = e1000e_get_bus_info_pcie,
1453 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1454 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1455 .led_on = e1000e_led_on_generic,
1456 .led_off = e1000e_led_off_generic,
1457 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1458 .write_vfta = e1000_write_vfta_generic,
1459 .clear_vfta = e1000_clear_vfta_generic,
1460 .reset_hw = e1000_reset_hw_80003es2lan,
1461 .init_hw = e1000_init_hw_80003es2lan,
1462 .setup_link = e1000e_setup_link,
1463 /* setup_physical_interface dependent on media type */
1464 .setup_led = e1000e_setup_led_generic,
1465};
1466
1467static struct e1000_phy_operations es2_phy_ops = {
1468 .acquire = e1000_acquire_phy_80003es2lan,
1469 .check_polarity = e1000_check_polarity_m88,
1470 .check_reset_block = e1000e_check_reset_block_generic,
1471 .commit = e1000e_phy_sw_reset,
1472 .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
1473 .get_cfg_done = e1000_get_cfg_done_80003es2lan,
1474 .get_cable_length = e1000_get_cable_length_80003es2lan,
1475 .get_info = e1000e_get_phy_info_m88,
1476 .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
1477 .release = e1000_release_phy_80003es2lan,
1478 .reset = e1000e_phy_hw_reset_generic,
1479 .set_d0_lplu_state = NULL,
1480 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1481 .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
1482 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
1483};
1484
1485static struct e1000_nvm_operations es2_nvm_ops = {
1486 .acquire = e1000_acquire_nvm_80003es2lan,
1487 .read = e1000e_read_nvm_eerd,
1488 .release = e1000_release_nvm_80003es2lan,
1489 .update = e1000e_update_nvm_checksum_generic,
1490 .valid_led_default = e1000e_valid_led_default,
1491 .validate = e1000e_validate_nvm_checksum_generic,
1492 .write = e1000_write_nvm_80003es2lan,
1493};
1494
1495struct e1000_info e1000_es2_info = {
1496 .mac = e1000_80003es2lan,
1497 .flags = FLAG_HAS_HW_VLAN_FILTER
1498 | FLAG_HAS_JUMBO_FRAMES
1499 | FLAG_HAS_WOL
1500 | FLAG_APME_IN_CTRL3
1501 | FLAG_RX_CSUM_ENABLED
1502 | FLAG_HAS_CTRLEXT_ON_LOAD
1503 | FLAG_RX_NEEDS_RESTART /* errata */
1504 | FLAG_TARC_SET_BIT_ZERO /* errata */
1505 | FLAG_APME_CHECK_PORT_B
1506 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1507 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1508 .flags2 = FLAG2_DMA_BURST,
1509 .pba = 38,
1510 .max_hw_frame_size = DEFAULT_JUMBO,
1511 .get_variants = e1000_get_variants_80003es2lan,
1512 .mac_ops = &es2_mac_ops,
1513 .phy_ops = &es2_phy_ops,
1514 .nvm_ops = &es2_nvm_ops,
1515};
1516
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
new file mode 100644
index 00000000000..6a0526a59a8
--- /dev/null
+++ b/drivers/net/e1000e/ethtool.c
@@ -0,0 +1,2082 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ethtool support for e1000 */
30
31#include <linux/netdevice.h>
32#include <linux/interrupt.h>
33#include <linux/ethtool.h>
34#include <linux/pci.h>
35#include <linux/slab.h>
36#include <linux/delay.h>
37
38#include "e1000.h"
39
40enum {NETDEV_STATS, E1000_STATS};
41
42struct e1000_stats {
43 char stat_string[ETH_GSTRING_LEN];
44 int type;
45 int sizeof_stat;
46 int stat_offset;
47};
48
49#define E1000_STAT(str, m) { \
50 .stat_string = str, \
51 .type = E1000_STATS, \
52 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
53 .stat_offset = offsetof(struct e1000_adapter, m) }
54#define E1000_NETDEV_STAT(str, m) { \
55 .stat_string = str, \
56 .type = NETDEV_STATS, \
57 .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
58 .stat_offset = offsetof(struct rtnl_link_stats64, m) }
59
60static const struct e1000_stats e1000_gstrings_stats[] = {
61 E1000_STAT("rx_packets", stats.gprc),
62 E1000_STAT("tx_packets", stats.gptc),
63 E1000_STAT("rx_bytes", stats.gorc),
64 E1000_STAT("tx_bytes", stats.gotc),
65 E1000_STAT("rx_broadcast", stats.bprc),
66 E1000_STAT("tx_broadcast", stats.bptc),
67 E1000_STAT("rx_multicast", stats.mprc),
68 E1000_STAT("tx_multicast", stats.mptc),
69 E1000_NETDEV_STAT("rx_errors", rx_errors),
70 E1000_NETDEV_STAT("tx_errors", tx_errors),
71 E1000_NETDEV_STAT("tx_dropped", tx_dropped),
72 E1000_STAT("multicast", stats.mprc),
73 E1000_STAT("collisions", stats.colc),
74 E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
75 E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
76 E1000_STAT("rx_crc_errors", stats.crcerrs),
77 E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
78 E1000_STAT("rx_no_buffer_count", stats.rnbc),
79 E1000_STAT("rx_missed_errors", stats.mpc),
80 E1000_STAT("tx_aborted_errors", stats.ecol),
81 E1000_STAT("tx_carrier_errors", stats.tncrs),
82 E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
83 E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
84 E1000_STAT("tx_window_errors", stats.latecol),
85 E1000_STAT("tx_abort_late_coll", stats.latecol),
86 E1000_STAT("tx_deferred_ok", stats.dc),
87 E1000_STAT("tx_single_coll_ok", stats.scc),
88 E1000_STAT("tx_multi_coll_ok", stats.mcc),
89 E1000_STAT("tx_timeout_count", tx_timeout_count),
90 E1000_STAT("tx_restart_queue", restart_queue),
91 E1000_STAT("rx_long_length_errors", stats.roc),
92 E1000_STAT("rx_short_length_errors", stats.ruc),
93 E1000_STAT("rx_align_errors", stats.algnerrc),
94 E1000_STAT("tx_tcp_seg_good", stats.tsctc),
95 E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
96 E1000_STAT("rx_flow_control_xon", stats.xonrxc),
97 E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
98 E1000_STAT("tx_flow_control_xon", stats.xontxc),
99 E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
100 E1000_STAT("rx_long_byte_count", stats.gorc),
101 E1000_STAT("rx_csum_offload_good", hw_csum_good),
102 E1000_STAT("rx_csum_offload_errors", hw_csum_err),
103 E1000_STAT("rx_header_split", rx_hdr_split),
104 E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
105 E1000_STAT("tx_smbus", stats.mgptc),
106 E1000_STAT("rx_smbus", stats.mgprc),
107 E1000_STAT("dropped_smbus", stats.mgpdc),
108 E1000_STAT("rx_dma_failed", rx_dma_failed),
109 E1000_STAT("tx_dma_failed", tx_dma_failed),
110};
111
112#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
113#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
114static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
115 "Register test (offline)", "Eeprom test (offline)",
116 "Interrupt test (offline)", "Loopback test (offline)",
117 "Link test (on/offline)"
118};
119#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
120
121static int e1000_get_settings(struct net_device *netdev,
122 struct ethtool_cmd *ecmd)
123{
124 struct e1000_adapter *adapter = netdev_priv(netdev);
125 struct e1000_hw *hw = &adapter->hw;
126 u32 speed;
127
128 if (hw->phy.media_type == e1000_media_type_copper) {
129
130 ecmd->supported = (SUPPORTED_10baseT_Half |
131 SUPPORTED_10baseT_Full |
132 SUPPORTED_100baseT_Half |
133 SUPPORTED_100baseT_Full |
134 SUPPORTED_1000baseT_Full |
135 SUPPORTED_Autoneg |
136 SUPPORTED_TP);
137 if (hw->phy.type == e1000_phy_ife)
138 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
139 ecmd->advertising = ADVERTISED_TP;
140
141 if (hw->mac.autoneg == 1) {
142 ecmd->advertising |= ADVERTISED_Autoneg;
143 /* the e1000 autoneg seems to match ethtool nicely */
144 ecmd->advertising |= hw->phy.autoneg_advertised;
145 }
146
147 ecmd->port = PORT_TP;
148 ecmd->phy_address = hw->phy.addr;
149 ecmd->transceiver = XCVR_INTERNAL;
150
151 } else {
152 ecmd->supported = (SUPPORTED_1000baseT_Full |
153 SUPPORTED_FIBRE |
154 SUPPORTED_Autoneg);
155
156 ecmd->advertising = (ADVERTISED_1000baseT_Full |
157 ADVERTISED_FIBRE |
158 ADVERTISED_Autoneg);
159
160 ecmd->port = PORT_FIBRE;
161 ecmd->transceiver = XCVR_EXTERNAL;
162 }
163
164 speed = -1;
165 ecmd->duplex = -1;
166
167 if (netif_running(netdev)) {
168 if (netif_carrier_ok(netdev)) {
169 speed = adapter->link_speed;
170 ecmd->duplex = adapter->link_duplex - 1;
171 }
172 } else {
173 u32 status = er32(STATUS);
174 if (status & E1000_STATUS_LU) {
175 if (status & E1000_STATUS_SPEED_1000)
176 speed = SPEED_1000;
177 else if (status & E1000_STATUS_SPEED_100)
178 speed = SPEED_100;
179 else
180 speed = SPEED_10;
181
182 if (status & E1000_STATUS_FD)
183 ecmd->duplex = DUPLEX_FULL;
184 else
185 ecmd->duplex = DUPLEX_HALF;
186 }
187 }
188
189 ethtool_cmd_speed_set(ecmd, speed);
190 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
191 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
192
193 /* MDI-X => 2; MDI =>1; Invalid =>0 */
194 if ((hw->phy.media_type == e1000_media_type_copper) &&
195 netif_carrier_ok(netdev))
196 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
197 ETH_TP_MDI;
198 else
199 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
200
201 return 0;
202}
203
204static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
205{
206 struct e1000_mac_info *mac = &adapter->hw.mac;
207
208 mac->autoneg = 0;
209
210 /* Make sure dplx is at most 1 bit and lsb of speed is not set
211 * for the switch() below to work */
212 if ((spd & 1) || (dplx & ~1))
213 goto err_inval;
214
215 /* Fiber NICs only allow 1000 gbps Full duplex */
216 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
217 spd != SPEED_1000 &&
218 dplx != DUPLEX_FULL) {
219 goto err_inval;
220 }
221
222 switch (spd + dplx) {
223 case SPEED_10 + DUPLEX_HALF:
224 mac->forced_speed_duplex = ADVERTISE_10_HALF;
225 break;
226 case SPEED_10 + DUPLEX_FULL:
227 mac->forced_speed_duplex = ADVERTISE_10_FULL;
228 break;
229 case SPEED_100 + DUPLEX_HALF:
230 mac->forced_speed_duplex = ADVERTISE_100_HALF;
231 break;
232 case SPEED_100 + DUPLEX_FULL:
233 mac->forced_speed_duplex = ADVERTISE_100_FULL;
234 break;
235 case SPEED_1000 + DUPLEX_FULL:
236 mac->autoneg = 1;
237 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
238 break;
239 case SPEED_1000 + DUPLEX_HALF: /* not supported */
240 default:
241 goto err_inval;
242 }
243 return 0;
244
245err_inval:
246 e_err("Unsupported Speed/Duplex configuration\n");
247 return -EINVAL;
248}
249
250static int e1000_set_settings(struct net_device *netdev,
251 struct ethtool_cmd *ecmd)
252{
253 struct e1000_adapter *adapter = netdev_priv(netdev);
254 struct e1000_hw *hw = &adapter->hw;
255
256 /*
257 * When SoL/IDER sessions are active, autoneg/speed/duplex
258 * cannot be changed
259 */
260 if (e1000_check_reset_block(hw)) {
261 e_err("Cannot change link characteristics when SoL/IDER is "
262 "active.\n");
263 return -EINVAL;
264 }
265
266 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
267 usleep_range(1000, 2000);
268
269 if (ecmd->autoneg == AUTONEG_ENABLE) {
270 hw->mac.autoneg = 1;
271 if (hw->phy.media_type == e1000_media_type_fiber)
272 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
273 ADVERTISED_FIBRE |
274 ADVERTISED_Autoneg;
275 else
276 hw->phy.autoneg_advertised = ecmd->advertising |
277 ADVERTISED_TP |
278 ADVERTISED_Autoneg;
279 ecmd->advertising = hw->phy.autoneg_advertised;
280 if (adapter->fc_autoneg)
281 hw->fc.requested_mode = e1000_fc_default;
282 } else {
283 u32 speed = ethtool_cmd_speed(ecmd);
284 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
285 clear_bit(__E1000_RESETTING, &adapter->state);
286 return -EINVAL;
287 }
288 }
289
290 /* reset the link */
291
292 if (netif_running(adapter->netdev)) {
293 e1000e_down(adapter);
294 e1000e_up(adapter);
295 } else {
296 e1000e_reset(adapter);
297 }
298
299 clear_bit(__E1000_RESETTING, &adapter->state);
300 return 0;
301}
302
303static void e1000_get_pauseparam(struct net_device *netdev,
304 struct ethtool_pauseparam *pause)
305{
306 struct e1000_adapter *adapter = netdev_priv(netdev);
307 struct e1000_hw *hw = &adapter->hw;
308
309 pause->autoneg =
310 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
311
312 if (hw->fc.current_mode == e1000_fc_rx_pause) {
313 pause->rx_pause = 1;
314 } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
315 pause->tx_pause = 1;
316 } else if (hw->fc.current_mode == e1000_fc_full) {
317 pause->rx_pause = 1;
318 pause->tx_pause = 1;
319 }
320}
321
322static int e1000_set_pauseparam(struct net_device *netdev,
323 struct ethtool_pauseparam *pause)
324{
325 struct e1000_adapter *adapter = netdev_priv(netdev);
326 struct e1000_hw *hw = &adapter->hw;
327 int retval = 0;
328
329 adapter->fc_autoneg = pause->autoneg;
330
331 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
332 usleep_range(1000, 2000);
333
334 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
335 hw->fc.requested_mode = e1000_fc_default;
336 if (netif_running(adapter->netdev)) {
337 e1000e_down(adapter);
338 e1000e_up(adapter);
339 } else {
340 e1000e_reset(adapter);
341 }
342 } else {
343 if (pause->rx_pause && pause->tx_pause)
344 hw->fc.requested_mode = e1000_fc_full;
345 else if (pause->rx_pause && !pause->tx_pause)
346 hw->fc.requested_mode = e1000_fc_rx_pause;
347 else if (!pause->rx_pause && pause->tx_pause)
348 hw->fc.requested_mode = e1000_fc_tx_pause;
349 else if (!pause->rx_pause && !pause->tx_pause)
350 hw->fc.requested_mode = e1000_fc_none;
351
352 hw->fc.current_mode = hw->fc.requested_mode;
353
354 if (hw->phy.media_type == e1000_media_type_fiber) {
355 retval = hw->mac.ops.setup_link(hw);
356 /* implicit goto out */
357 } else {
358 retval = e1000e_force_mac_fc(hw);
359 if (retval)
360 goto out;
361 e1000e_set_fc_watermarks(hw);
362 }
363 }
364
365out:
366 clear_bit(__E1000_RESETTING, &adapter->state);
367 return retval;
368}
369
370static u32 e1000_get_rx_csum(struct net_device *netdev)
371{
372 struct e1000_adapter *adapter = netdev_priv(netdev);
373 return adapter->flags & FLAG_RX_CSUM_ENABLED;
374}
375
376static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
377{
378 struct e1000_adapter *adapter = netdev_priv(netdev);
379
380 if (data)
381 adapter->flags |= FLAG_RX_CSUM_ENABLED;
382 else
383 adapter->flags &= ~FLAG_RX_CSUM_ENABLED;
384
385 if (netif_running(netdev))
386 e1000e_reinit_locked(adapter);
387 else
388 e1000e_reset(adapter);
389 return 0;
390}
391
392static u32 e1000_get_tx_csum(struct net_device *netdev)
393{
394 return (netdev->features & NETIF_F_HW_CSUM) != 0;
395}
396
397static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
398{
399 if (data)
400 netdev->features |= NETIF_F_HW_CSUM;
401 else
402 netdev->features &= ~NETIF_F_HW_CSUM;
403
404 return 0;
405}
406
407static int e1000_set_tso(struct net_device *netdev, u32 data)
408{
409 struct e1000_adapter *adapter = netdev_priv(netdev);
410
411 if (data) {
412 netdev->features |= NETIF_F_TSO;
413 netdev->features |= NETIF_F_TSO6;
414 } else {
415 netdev->features &= ~NETIF_F_TSO;
416 netdev->features &= ~NETIF_F_TSO6;
417 }
418
419 adapter->flags |= FLAG_TSO_FORCE;
420 return 0;
421}
422
423static u32 e1000_get_msglevel(struct net_device *netdev)
424{
425 struct e1000_adapter *adapter = netdev_priv(netdev);
426 return adapter->msg_enable;
427}
428
429static void e1000_set_msglevel(struct net_device *netdev, u32 data)
430{
431 struct e1000_adapter *adapter = netdev_priv(netdev);
432 adapter->msg_enable = data;
433}
434
435static int e1000_get_regs_len(struct net_device *netdev)
436{
437#define E1000_REGS_LEN 32 /* overestimate */
438 return E1000_REGS_LEN * sizeof(u32);
439}
440
441static void e1000_get_regs(struct net_device *netdev,
442 struct ethtool_regs *regs, void *p)
443{
444 struct e1000_adapter *adapter = netdev_priv(netdev);
445 struct e1000_hw *hw = &adapter->hw;
446 u32 *regs_buff = p;
447 u16 phy_data;
448
449 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
450
451 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
452 adapter->pdev->device;
453
454 regs_buff[0] = er32(CTRL);
455 regs_buff[1] = er32(STATUS);
456
457 regs_buff[2] = er32(RCTL);
458 regs_buff[3] = er32(RDLEN);
459 regs_buff[4] = er32(RDH);
460 regs_buff[5] = er32(RDT);
461 regs_buff[6] = er32(RDTR);
462
463 regs_buff[7] = er32(TCTL);
464 regs_buff[8] = er32(TDLEN);
465 regs_buff[9] = er32(TDH);
466 regs_buff[10] = er32(TDT);
467 regs_buff[11] = er32(TIDV);
468
469 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
470
471 /* ethtool doesn't use anything past this point, so all this
472 * code is likely legacy junk for apps that may or may not
473 * exist */
474 if (hw->phy.type == e1000_phy_m88) {
475 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
476 regs_buff[13] = (u32)phy_data; /* cable length */
477 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
478 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
479 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
480 e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
481 regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
482 regs_buff[18] = regs_buff[13]; /* cable polarity */
483 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
484 regs_buff[20] = regs_buff[17]; /* polarity correction */
485 /* phy receive errors */
486 regs_buff[22] = adapter->phy_stats.receive_errors;
487 regs_buff[23] = regs_buff[13]; /* mdix mode */
488 }
489 regs_buff[21] = 0; /* was idle_errors */
490 e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
491 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
492 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
493}
494
495static int e1000_get_eeprom_len(struct net_device *netdev)
496{
497 struct e1000_adapter *adapter = netdev_priv(netdev);
498 return adapter->hw.nvm.word_size * 2;
499}
500
501static int e1000_get_eeprom(struct net_device *netdev,
502 struct ethtool_eeprom *eeprom, u8 *bytes)
503{
504 struct e1000_adapter *adapter = netdev_priv(netdev);
505 struct e1000_hw *hw = &adapter->hw;
506 u16 *eeprom_buff;
507 int first_word;
508 int last_word;
509 int ret_val = 0;
510 u16 i;
511
512 if (eeprom->len == 0)
513 return -EINVAL;
514
515 eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
516
517 first_word = eeprom->offset >> 1;
518 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
519
520 eeprom_buff = kmalloc(sizeof(u16) *
521 (last_word - first_word + 1), GFP_KERNEL);
522 if (!eeprom_buff)
523 return -ENOMEM;
524
525 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
526 ret_val = e1000_read_nvm(hw, first_word,
527 last_word - first_word + 1,
528 eeprom_buff);
529 } else {
530 for (i = 0; i < last_word - first_word + 1; i++) {
531 ret_val = e1000_read_nvm(hw, first_word + i, 1,
532 &eeprom_buff[i]);
533 if (ret_val)
534 break;
535 }
536 }
537
538 if (ret_val) {
539 /* a read error occurred, throw away the result */
540 memset(eeprom_buff, 0xff, sizeof(u16) *
541 (last_word - first_word + 1));
542 } else {
543 /* Device's eeprom is always little-endian, word addressable */
544 for (i = 0; i < last_word - first_word + 1; i++)
545 le16_to_cpus(&eeprom_buff[i]);
546 }
547
548 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
549 kfree(eeprom_buff);
550
551 return ret_val;
552}
553
554static int e1000_set_eeprom(struct net_device *netdev,
555 struct ethtool_eeprom *eeprom, u8 *bytes)
556{
557 struct e1000_adapter *adapter = netdev_priv(netdev);
558 struct e1000_hw *hw = &adapter->hw;
559 u16 *eeprom_buff;
560 void *ptr;
561 int max_len;
562 int first_word;
563 int last_word;
564 int ret_val = 0;
565 u16 i;
566
567 if (eeprom->len == 0)
568 return -EOPNOTSUPP;
569
570 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
571 return -EFAULT;
572
573 if (adapter->flags & FLAG_READ_ONLY_NVM)
574 return -EINVAL;
575
576 max_len = hw->nvm.word_size * 2;
577
578 first_word = eeprom->offset >> 1;
579 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
580 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
581 if (!eeprom_buff)
582 return -ENOMEM;
583
584 ptr = (void *)eeprom_buff;
585
586 if (eeprom->offset & 1) {
587 /* need read/modify/write of first changed EEPROM word */
588 /* only the second byte of the word is being modified */
589 ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
590 ptr++;
591 }
592 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
593 /* need read/modify/write of last changed EEPROM word */
594 /* only the first byte of the word is being modified */
595 ret_val = e1000_read_nvm(hw, last_word, 1,
596 &eeprom_buff[last_word - first_word]);
597
598 if (ret_val)
599 goto out;
600
601 /* Device's eeprom is always little-endian, word addressable */
602 for (i = 0; i < last_word - first_word + 1; i++)
603 le16_to_cpus(&eeprom_buff[i]);
604
605 memcpy(ptr, bytes, eeprom->len);
606
607 for (i = 0; i < last_word - first_word + 1; i++)
608 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
609
610 ret_val = e1000_write_nvm(hw, first_word,
611 last_word - first_word + 1, eeprom_buff);
612
613 if (ret_val)
614 goto out;
615
616 /*
617 * Update the checksum over the first part of the EEPROM if needed
618 * and flush shadow RAM for applicable controllers
619 */
620 if ((first_word <= NVM_CHECKSUM_REG) ||
621 (hw->mac.type == e1000_82583) ||
622 (hw->mac.type == e1000_82574) ||
623 (hw->mac.type == e1000_82573))
624 ret_val = e1000e_update_nvm_checksum(hw);
625
626out:
627 kfree(eeprom_buff);
628 return ret_val;
629}
630
631static void e1000_get_drvinfo(struct net_device *netdev,
632 struct ethtool_drvinfo *drvinfo)
633{
634 struct e1000_adapter *adapter = netdev_priv(netdev);
635 char firmware_version[32];
636
637 strncpy(drvinfo->driver, e1000e_driver_name,
638 sizeof(drvinfo->driver) - 1);
639 strncpy(drvinfo->version, e1000e_driver_version,
640 sizeof(drvinfo->version) - 1);
641
642 /*
643 * EEPROM image version # is reported as firmware version # for
644 * PCI-E controllers
645 */
646 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
647 (adapter->eeprom_vers & 0xF000) >> 12,
648 (adapter->eeprom_vers & 0x0FF0) >> 4,
649 (adapter->eeprom_vers & 0x000F));
650
651 strncpy(drvinfo->fw_version, firmware_version,
652 sizeof(drvinfo->fw_version) - 1);
653 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
654 sizeof(drvinfo->bus_info) - 1);
655 drvinfo->regdump_len = e1000_get_regs_len(netdev);
656 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
657}
658
659static void e1000_get_ringparam(struct net_device *netdev,
660 struct ethtool_ringparam *ring)
661{
662 struct e1000_adapter *adapter = netdev_priv(netdev);
663 struct e1000_ring *tx_ring = adapter->tx_ring;
664 struct e1000_ring *rx_ring = adapter->rx_ring;
665
666 ring->rx_max_pending = E1000_MAX_RXD;
667 ring->tx_max_pending = E1000_MAX_TXD;
668 ring->rx_mini_max_pending = 0;
669 ring->rx_jumbo_max_pending = 0;
670 ring->rx_pending = rx_ring->count;
671 ring->tx_pending = tx_ring->count;
672 ring->rx_mini_pending = 0;
673 ring->rx_jumbo_pending = 0;
674}
675
676static int e1000_set_ringparam(struct net_device *netdev,
677 struct ethtool_ringparam *ring)
678{
679 struct e1000_adapter *adapter = netdev_priv(netdev);
680 struct e1000_ring *tx_ring, *tx_old;
681 struct e1000_ring *rx_ring, *rx_old;
682 int err;
683
684 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
685 return -EINVAL;
686
687 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
688 usleep_range(1000, 2000);
689
690 if (netif_running(adapter->netdev))
691 e1000e_down(adapter);
692
693 tx_old = adapter->tx_ring;
694 rx_old = adapter->rx_ring;
695
696 err = -ENOMEM;
697 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
698 if (!tx_ring)
699 goto err_alloc_tx;
700
701 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
702 if (!rx_ring)
703 goto err_alloc_rx;
704
705 adapter->tx_ring = tx_ring;
706 adapter->rx_ring = rx_ring;
707
708 rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
709 rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
710 rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
711
712 tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
713 tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
714 tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
715
716 if (netif_running(adapter->netdev)) {
717 /* Try to get new resources before deleting old */
718 err = e1000e_setup_rx_resources(adapter);
719 if (err)
720 goto err_setup_rx;
721 err = e1000e_setup_tx_resources(adapter);
722 if (err)
723 goto err_setup_tx;
724
725 /*
726 * restore the old in order to free it,
727 * then add in the new
728 */
729 adapter->rx_ring = rx_old;
730 adapter->tx_ring = tx_old;
731 e1000e_free_rx_resources(adapter);
732 e1000e_free_tx_resources(adapter);
733 kfree(tx_old);
734 kfree(rx_old);
735 adapter->rx_ring = rx_ring;
736 adapter->tx_ring = tx_ring;
737 err = e1000e_up(adapter);
738 if (err)
739 goto err_setup;
740 }
741
742 clear_bit(__E1000_RESETTING, &adapter->state);
743 return 0;
744err_setup_tx:
745 e1000e_free_rx_resources(adapter);
746err_setup_rx:
747 adapter->rx_ring = rx_old;
748 adapter->tx_ring = tx_old;
749 kfree(rx_ring);
750err_alloc_rx:
751 kfree(tx_ring);
752err_alloc_tx:
753 e1000e_up(adapter);
754err_setup:
755 clear_bit(__E1000_RESETTING, &adapter->state);
756 return err;
757}
758
759static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
760 int reg, int offset, u32 mask, u32 write)
761{
762 u32 pat, val;
763 static const u32 test[] = {
764 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
765 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
766 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
767 (test[pat] & write));
768 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
769 if (val != (test[pat] & write & mask)) {
770 e_err("pattern test reg %04X failed: got 0x%08X "
771 "expected 0x%08X\n", reg + offset, val,
772 (test[pat] & write & mask));
773 *data = reg;
774 return 1;
775 }
776 }
777 return 0;
778}
779
780static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
781 int reg, u32 mask, u32 write)
782{
783 u32 val;
784 __ew32(&adapter->hw, reg, write & mask);
785 val = __er32(&adapter->hw, reg);
786 if ((write & mask) != (val & mask)) {
787 e_err("set/check reg %04X test failed: got 0x%08X "
788 "expected 0x%08X\n", reg, (val & mask), (write & mask));
789 *data = reg;
790 return 1;
791 }
792 return 0;
793}
794#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
795 do { \
796 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
797 return 1; \
798 } while (0)
799#define REG_PATTERN_TEST(reg, mask, write) \
800 REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
801
802#define REG_SET_AND_CHECK(reg, mask, write) \
803 do { \
804 if (reg_set_and_check(adapter, data, reg, mask, write)) \
805 return 1; \
806 } while (0)
807
808static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
809{
810 struct e1000_hw *hw = &adapter->hw;
811 struct e1000_mac_info *mac = &adapter->hw.mac;
812 u32 value;
813 u32 before;
814 u32 after;
815 u32 i;
816 u32 toggle;
817 u32 mask;
818
819 /*
820 * The status register is Read Only, so a write should fail.
821 * Some bits that get toggled are ignored.
822 */
823 switch (mac->type) {
824 /* there are several bits on newer hardware that are r/w */
825 case e1000_82571:
826 case e1000_82572:
827 case e1000_80003es2lan:
828 toggle = 0x7FFFF3FF;
829 break;
830 default:
831 toggle = 0x7FFFF033;
832 break;
833 }
834
835 before = er32(STATUS);
836 value = (er32(STATUS) & toggle);
837 ew32(STATUS, toggle);
838 after = er32(STATUS) & toggle;
839 if (value != after) {
840 e_err("failed STATUS register test got: 0x%08X expected: "
841 "0x%08X\n", after, value);
842 *data = 1;
843 return 1;
844 }
845 /* restore previous status */
846 ew32(STATUS, before);
847
848 if (!(adapter->flags & FLAG_IS_ICH)) {
849 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
850 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
851 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
852 REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
853 }
854
855 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
856 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
857 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
858 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
859 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
860 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
861 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
862 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
863 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
864 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
865
866 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
867
868 before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
869 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
870 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
871
872 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
873 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
874 if (!(adapter->flags & FLAG_IS_ICH))
875 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
876 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
877 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
878 mask = 0x8003FFFF;
879 switch (mac->type) {
880 case e1000_ich10lan:
881 case e1000_pchlan:
882 case e1000_pch2lan:
883 mask |= (1 << 18);
884 break;
885 default:
886 break;
887 }
888 for (i = 0; i < mac->rar_entry_count; i++)
889 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
890 mask, 0xFFFFFFFF);
891
892 for (i = 0; i < mac->mta_reg_count; i++)
893 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
894
895 *data = 0;
896 return 0;
897}
898
899static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
900{
901 u16 temp;
902 u16 checksum = 0;
903 u16 i;
904
905 *data = 0;
906 /* Read and add up the contents of the EEPROM */
907 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
908 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
909 *data = 1;
910 return *data;
911 }
912 checksum += temp;
913 }
914
915 /* If Checksum is not Correct return error else test passed */
916 if ((checksum != (u16) NVM_SUM) && !(*data))
917 *data = 2;
918
919 return *data;
920}
921
922static irqreturn_t e1000_test_intr(int irq, void *data)
923{
924 struct net_device *netdev = (struct net_device *) data;
925 struct e1000_adapter *adapter = netdev_priv(netdev);
926 struct e1000_hw *hw = &adapter->hw;
927
928 adapter->test_icr |= er32(ICR);
929
930 return IRQ_HANDLED;
931}
932
933static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
934{
935 struct net_device *netdev = adapter->netdev;
936 struct e1000_hw *hw = &adapter->hw;
937 u32 mask;
938 u32 shared_int = 1;
939 u32 irq = adapter->pdev->irq;
940 int i;
941 int ret_val = 0;
942 int int_mode = E1000E_INT_MODE_LEGACY;
943
944 *data = 0;
945
946 /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
947 if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
948 int_mode = adapter->int_mode;
949 e1000e_reset_interrupt_capability(adapter);
950 adapter->int_mode = E1000E_INT_MODE_LEGACY;
951 e1000e_set_interrupt_capability(adapter);
952 }
953 /* Hook up test interrupt handler just for this test */
954 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
955 netdev)) {
956 shared_int = 0;
957 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
958 netdev->name, netdev)) {
959 *data = 1;
960 ret_val = -1;
961 goto out;
962 }
963 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
964
965 /* Disable all the interrupts */
966 ew32(IMC, 0xFFFFFFFF);
967 e1e_flush();
968 usleep_range(10000, 20000);
969
970 /* Test each interrupt */
971 for (i = 0; i < 10; i++) {
972 /* Interrupt to test */
973 mask = 1 << i;
974
975 if (adapter->flags & FLAG_IS_ICH) {
976 switch (mask) {
977 case E1000_ICR_RXSEQ:
978 continue;
979 case 0x00000100:
980 if (adapter->hw.mac.type == e1000_ich8lan ||
981 adapter->hw.mac.type == e1000_ich9lan)
982 continue;
983 break;
984 default:
985 break;
986 }
987 }
988
989 if (!shared_int) {
990 /*
991 * Disable the interrupt to be reported in
992 * the cause register and then force the same
993 * interrupt and see if one gets posted. If
994 * an interrupt was posted to the bus, the
995 * test failed.
996 */
997 adapter->test_icr = 0;
998 ew32(IMC, mask);
999 ew32(ICS, mask);
1000 e1e_flush();
1001 usleep_range(10000, 20000);
1002
1003 if (adapter->test_icr & mask) {
1004 *data = 3;
1005 break;
1006 }
1007 }
1008
1009 /*
1010 * Enable the interrupt to be reported in
1011 * the cause register and then force the same
1012 * interrupt and see if one gets posted. If
1013 * an interrupt was not posted to the bus, the
1014 * test failed.
1015 */
1016 adapter->test_icr = 0;
1017 ew32(IMS, mask);
1018 ew32(ICS, mask);
1019 e1e_flush();
1020 usleep_range(10000, 20000);
1021
1022 if (!(adapter->test_icr & mask)) {
1023 *data = 4;
1024 break;
1025 }
1026
1027 if (!shared_int) {
1028 /*
1029 * Disable the other interrupts to be reported in
1030 * the cause register and then force the other
1031 * interrupts and see if any get posted. If
1032 * an interrupt was posted to the bus, the
1033 * test failed.
1034 */
1035 adapter->test_icr = 0;
1036 ew32(IMC, ~mask & 0x00007FFF);
1037 ew32(ICS, ~mask & 0x00007FFF);
1038 e1e_flush();
1039 usleep_range(10000, 20000);
1040
1041 if (adapter->test_icr) {
1042 *data = 5;
1043 break;
1044 }
1045 }
1046 }
1047
1048 /* Disable all the interrupts */
1049 ew32(IMC, 0xFFFFFFFF);
1050 e1e_flush();
1051 usleep_range(10000, 20000);
1052
1053 /* Unhook test interrupt handler */
1054 free_irq(irq, netdev);
1055
1056out:
1057 if (int_mode == E1000E_INT_MODE_MSIX) {
1058 e1000e_reset_interrupt_capability(adapter);
1059 adapter->int_mode = int_mode;
1060 e1000e_set_interrupt_capability(adapter);
1061 }
1062
1063 return ret_val;
1064}
1065
1066static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1067{
1068 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1069 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1070 struct pci_dev *pdev = adapter->pdev;
1071 int i;
1072
1073 if (tx_ring->desc && tx_ring->buffer_info) {
1074 for (i = 0; i < tx_ring->count; i++) {
1075 if (tx_ring->buffer_info[i].dma)
1076 dma_unmap_single(&pdev->dev,
1077 tx_ring->buffer_info[i].dma,
1078 tx_ring->buffer_info[i].length,
1079 DMA_TO_DEVICE);
1080 if (tx_ring->buffer_info[i].skb)
1081 dev_kfree_skb(tx_ring->buffer_info[i].skb);
1082 }
1083 }
1084
1085 if (rx_ring->desc && rx_ring->buffer_info) {
1086 for (i = 0; i < rx_ring->count; i++) {
1087 if (rx_ring->buffer_info[i].dma)
1088 dma_unmap_single(&pdev->dev,
1089 rx_ring->buffer_info[i].dma,
1090 2048, DMA_FROM_DEVICE);
1091 if (rx_ring->buffer_info[i].skb)
1092 dev_kfree_skb(rx_ring->buffer_info[i].skb);
1093 }
1094 }
1095
1096 if (tx_ring->desc) {
1097 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1098 tx_ring->dma);
1099 tx_ring->desc = NULL;
1100 }
1101 if (rx_ring->desc) {
1102 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1103 rx_ring->dma);
1104 rx_ring->desc = NULL;
1105 }
1106
1107 kfree(tx_ring->buffer_info);
1108 tx_ring->buffer_info = NULL;
1109 kfree(rx_ring->buffer_info);
1110 rx_ring->buffer_info = NULL;
1111}
1112
1113static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114{
1115 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1116 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1117 struct pci_dev *pdev = adapter->pdev;
1118 struct e1000_hw *hw = &adapter->hw;
1119 u32 rctl;
1120 int i;
1121 int ret_val;
1122
1123 /* Setup Tx descriptor ring and Tx buffers */
1124
1125 if (!tx_ring->count)
1126 tx_ring->count = E1000_DEFAULT_TXD;
1127
1128 tx_ring->buffer_info = kcalloc(tx_ring->count,
1129 sizeof(struct e1000_buffer),
1130 GFP_KERNEL);
1131 if (!(tx_ring->buffer_info)) {
1132 ret_val = 1;
1133 goto err_nomem;
1134 }
1135
1136 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1137 tx_ring->size = ALIGN(tx_ring->size, 4096);
1138 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1139 &tx_ring->dma, GFP_KERNEL);
1140 if (!tx_ring->desc) {
1141 ret_val = 2;
1142 goto err_nomem;
1143 }
1144 tx_ring->next_to_use = 0;
1145 tx_ring->next_to_clean = 0;
1146
1147 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1148 ew32(TDBAH, ((u64) tx_ring->dma >> 32));
1149 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
1150 ew32(TDH, 0);
1151 ew32(TDT, 0);
1152 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1153 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1154 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1155
1156 for (i = 0; i < tx_ring->count; i++) {
1157 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1158 struct sk_buff *skb;
1159 unsigned int skb_size = 1024;
1160
1161 skb = alloc_skb(skb_size, GFP_KERNEL);
1162 if (!skb) {
1163 ret_val = 3;
1164 goto err_nomem;
1165 }
1166 skb_put(skb, skb_size);
1167 tx_ring->buffer_info[i].skb = skb;
1168 tx_ring->buffer_info[i].length = skb->len;
1169 tx_ring->buffer_info[i].dma =
1170 dma_map_single(&pdev->dev, skb->data, skb->len,
1171 DMA_TO_DEVICE);
1172 if (dma_mapping_error(&pdev->dev,
1173 tx_ring->buffer_info[i].dma)) {
1174 ret_val = 4;
1175 goto err_nomem;
1176 }
1177 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1178 tx_desc->lower.data = cpu_to_le32(skb->len);
1179 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1180 E1000_TXD_CMD_IFCS |
1181 E1000_TXD_CMD_RS);
1182 tx_desc->upper.data = 0;
1183 }
1184
1185 /* Setup Rx descriptor ring and Rx buffers */
1186
1187 if (!rx_ring->count)
1188 rx_ring->count = E1000_DEFAULT_RXD;
1189
1190 rx_ring->buffer_info = kcalloc(rx_ring->count,
1191 sizeof(struct e1000_buffer),
1192 GFP_KERNEL);
1193 if (!(rx_ring->buffer_info)) {
1194 ret_val = 5;
1195 goto err_nomem;
1196 }
1197
1198 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1199 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1200 &rx_ring->dma, GFP_KERNEL);
1201 if (!rx_ring->desc) {
1202 ret_val = 6;
1203 goto err_nomem;
1204 }
1205 rx_ring->next_to_use = 0;
1206 rx_ring->next_to_clean = 0;
1207
1208 rctl = er32(RCTL);
1209 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1210 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1211 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1212 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1213 ew32(RDLEN, rx_ring->size);
1214 ew32(RDH, 0);
1215 ew32(RDT, 0);
1216 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1217 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1218 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1219 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1220 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1221 ew32(RCTL, rctl);
1222
1223 for (i = 0; i < rx_ring->count; i++) {
1224 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
1225 struct sk_buff *skb;
1226
1227 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
1228 if (!skb) {
1229 ret_val = 7;
1230 goto err_nomem;
1231 }
1232 skb_reserve(skb, NET_IP_ALIGN);
1233 rx_ring->buffer_info[i].skb = skb;
1234 rx_ring->buffer_info[i].dma =
1235 dma_map_single(&pdev->dev, skb->data, 2048,
1236 DMA_FROM_DEVICE);
1237 if (dma_mapping_error(&pdev->dev,
1238 rx_ring->buffer_info[i].dma)) {
1239 ret_val = 8;
1240 goto err_nomem;
1241 }
1242 rx_desc->buffer_addr =
1243 cpu_to_le64(rx_ring->buffer_info[i].dma);
1244 memset(skb->data, 0x00, skb->len);
1245 }
1246
1247 return 0;
1248
1249err_nomem:
1250 e1000_free_desc_rings(adapter);
1251 return ret_val;
1252}
1253
1254static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1255{
1256 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1257 e1e_wphy(&adapter->hw, 29, 0x001F);
1258 e1e_wphy(&adapter->hw, 30, 0x8FFC);
1259 e1e_wphy(&adapter->hw, 29, 0x001A);
1260 e1e_wphy(&adapter->hw, 30, 0x8FF0);
1261}
1262
1263static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1264{
1265 struct e1000_hw *hw = &adapter->hw;
1266 u32 ctrl_reg = 0;
1267 u16 phy_reg = 0;
1268 s32 ret_val = 0;
1269
1270 hw->mac.autoneg = 0;
1271
1272 if (hw->phy.type == e1000_phy_ife) {
1273 /* force 100, set loopback */
1274 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1275
1276 /* Now set up the MAC to the same speed/duplex as the PHY. */
1277 ctrl_reg = er32(CTRL);
1278 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1279 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1280 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1281 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1282 E1000_CTRL_FD); /* Force Duplex to FULL */
1283
1284 ew32(CTRL, ctrl_reg);
1285 e1e_flush();
1286 udelay(500);
1287
1288 return 0;
1289 }
1290
1291 /* Specific PHY configuration for loopback */
1292 switch (hw->phy.type) {
1293 case e1000_phy_m88:
1294 /* Auto-MDI/MDIX Off */
1295 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1296 /* reset to update Auto-MDI/MDIX */
1297 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1298 /* autoneg off */
1299 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1300 break;
1301 case e1000_phy_gg82563:
1302 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1303 break;
1304 case e1000_phy_bm:
1305 /* Set Default MAC Interface speed to 1GB */
1306 e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
1307 phy_reg &= ~0x0007;
1308 phy_reg |= 0x006;
1309 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1310 /* Assert SW reset for above settings to take effect */
1311 e1000e_commit_phy(hw);
1312 mdelay(1);
1313 /* Force Full Duplex */
1314 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1315 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
1316 /* Set Link Up (in force link) */
1317 e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
1318 e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
1319 /* Force Link */
1320 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1321 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
1322 /* Set Early Link Enable */
1323 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1324 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1325 break;
1326 case e1000_phy_82577:
1327 case e1000_phy_82578:
1328 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1329 ret_val = hw->phy.ops.acquire(hw);
1330 if (ret_val) {
1331 e_err("Cannot setup 1Gbps loopback.\n");
1332 return ret_val;
1333 }
1334 e1000_configure_k1_ich8lan(hw, false);
1335 hw->phy.ops.release(hw);
1336 break;
1337 case e1000_phy_82579:
1338 /* Disable PHY energy detect power down */
1339 e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
1340 e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
1341 /* Disable full chip energy detect */
1342 e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
1343 e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
1344 /* Enable loopback on the PHY */
1345#define I82577_PHY_LBK_CTRL 19
1346 e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
1347 break;
1348 default:
1349 break;
1350 }
1351
1352 /* force 1000, set loopback */
1353 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1354 mdelay(250);
1355
1356 /* Now set up the MAC to the same speed/duplex as the PHY. */
1357 ctrl_reg = er32(CTRL);
1358 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1359 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1360 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1361 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1362 E1000_CTRL_FD); /* Force Duplex to FULL */
1363
1364 if (adapter->flags & FLAG_IS_ICH)
1365 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1366
1367 if (hw->phy.media_type == e1000_media_type_copper &&
1368 hw->phy.type == e1000_phy_m88) {
1369 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1370 } else {
1371 /*
1372 * Set the ILOS bit on the fiber Nic if half duplex link is
1373 * detected.
1374 */
1375 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1376 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1377 }
1378
1379 ew32(CTRL, ctrl_reg);
1380
1381 /*
1382 * Disable the receiver on the PHY so when a cable is plugged in, the
1383 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1384 */
1385 if (hw->phy.type == e1000_phy_m88)
1386 e1000_phy_disable_receiver(adapter);
1387
1388 udelay(500);
1389
1390 return 0;
1391}
1392
1393static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1394{
1395 struct e1000_hw *hw = &adapter->hw;
1396 u32 ctrl = er32(CTRL);
1397 int link = 0;
1398
1399 /* special requirements for 82571/82572 fiber adapters */
1400
1401 /*
1402 * jump through hoops to make sure link is up because serdes
1403 * link is hardwired up
1404 */
1405 ctrl |= E1000_CTRL_SLU;
1406 ew32(CTRL, ctrl);
1407
1408 /* disable autoneg */
1409 ctrl = er32(TXCW);
1410 ctrl &= ~(1 << 31);
1411 ew32(TXCW, ctrl);
1412
1413 link = (er32(STATUS) & E1000_STATUS_LU);
1414
1415 if (!link) {
1416 /* set invert loss of signal */
1417 ctrl = er32(CTRL);
1418 ctrl |= E1000_CTRL_ILOS;
1419 ew32(CTRL, ctrl);
1420 }
1421
1422 /*
1423 * special write to serdes control register to enable SerDes analog
1424 * loopback
1425 */
1426#define E1000_SERDES_LB_ON 0x410
1427 ew32(SCTL, E1000_SERDES_LB_ON);
1428 e1e_flush();
1429 usleep_range(10000, 20000);
1430
1431 return 0;
1432}
1433
1434/* only call this for fiber/serdes connections to es2lan */
1435static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1436{
1437 struct e1000_hw *hw = &adapter->hw;
1438 u32 ctrlext = er32(CTRL_EXT);
1439 u32 ctrl = er32(CTRL);
1440
1441 /*
1442 * save CTRL_EXT to restore later, reuse an empty variable (unused
1443 * on mac_type 80003es2lan)
1444 */
1445 adapter->tx_fifo_head = ctrlext;
1446
1447 /* clear the serdes mode bits, putting the device into mac loopback */
1448 ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1449 ew32(CTRL_EXT, ctrlext);
1450
1451 /* force speed to 1000/FD, link up */
1452 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1453 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
1454 E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
1455 ew32(CTRL, ctrl);
1456
1457 /* set mac loopback */
1458 ctrl = er32(RCTL);
1459 ctrl |= E1000_RCTL_LBM_MAC;
1460 ew32(RCTL, ctrl);
1461
1462 /* set testing mode parameters (no need to reset later) */
1463#define KMRNCTRLSTA_OPMODE (0x1F << 16)
1464#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1465 ew32(KMRNCTRLSTA,
1466 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1467
1468 return 0;
1469}
1470
1471static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1472{
1473 struct e1000_hw *hw = &adapter->hw;
1474 u32 rctl;
1475
1476 if (hw->phy.media_type == e1000_media_type_fiber ||
1477 hw->phy.media_type == e1000_media_type_internal_serdes) {
1478 switch (hw->mac.type) {
1479 case e1000_80003es2lan:
1480 return e1000_set_es2lan_mac_loopback(adapter);
1481 break;
1482 case e1000_82571:
1483 case e1000_82572:
1484 return e1000_set_82571_fiber_loopback(adapter);
1485 break;
1486 default:
1487 rctl = er32(RCTL);
1488 rctl |= E1000_RCTL_LBM_TCVR;
1489 ew32(RCTL, rctl);
1490 return 0;
1491 }
1492 } else if (hw->phy.media_type == e1000_media_type_copper) {
1493 return e1000_integrated_phy_loopback(adapter);
1494 }
1495
1496 return 7;
1497}
1498
1499static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1500{
1501 struct e1000_hw *hw = &adapter->hw;
1502 u32 rctl;
1503 u16 phy_reg;
1504
1505 rctl = er32(RCTL);
1506 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1507 ew32(RCTL, rctl);
1508
1509 switch (hw->mac.type) {
1510 case e1000_80003es2lan:
1511 if (hw->phy.media_type == e1000_media_type_fiber ||
1512 hw->phy.media_type == e1000_media_type_internal_serdes) {
1513 /* restore CTRL_EXT, stealing space from tx_fifo_head */
1514 ew32(CTRL_EXT, adapter->tx_fifo_head);
1515 adapter->tx_fifo_head = 0;
1516 }
1517 /* fall through */
1518 case e1000_82571:
1519 case e1000_82572:
1520 if (hw->phy.media_type == e1000_media_type_fiber ||
1521 hw->phy.media_type == e1000_media_type_internal_serdes) {
1522#define E1000_SERDES_LB_OFF 0x400
1523 ew32(SCTL, E1000_SERDES_LB_OFF);
1524 e1e_flush();
1525 usleep_range(10000, 20000);
1526 break;
1527 }
1528 /* Fall Through */
1529 default:
1530 hw->mac.autoneg = 1;
1531 if (hw->phy.type == e1000_phy_gg82563)
1532 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
1533 e1e_rphy(hw, PHY_CONTROL, &phy_reg);
1534 if (phy_reg & MII_CR_LOOPBACK) {
1535 phy_reg &= ~MII_CR_LOOPBACK;
1536 e1e_wphy(hw, PHY_CONTROL, phy_reg);
1537 e1000e_commit_phy(hw);
1538 }
1539 break;
1540 }
1541}
1542
1543static void e1000_create_lbtest_frame(struct sk_buff *skb,
1544 unsigned int frame_size)
1545{
1546 memset(skb->data, 0xFF, frame_size);
1547 frame_size &= ~1;
1548 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1549 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1550 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1551}
1552
1553static int e1000_check_lbtest_frame(struct sk_buff *skb,
1554 unsigned int frame_size)
1555{
1556 frame_size &= ~1;
1557 if (*(skb->data + 3) == 0xFF)
1558 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1559 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1560 return 0;
1561 return 13;
1562}
1563
1564static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1565{
1566 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1567 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1568 struct pci_dev *pdev = adapter->pdev;
1569 struct e1000_hw *hw = &adapter->hw;
1570 int i, j, k, l;
1571 int lc;
1572 int good_cnt;
1573 int ret_val = 0;
1574 unsigned long time;
1575
1576 ew32(RDT, rx_ring->count - 1);
1577
1578 /*
1579 * Calculate the loop count based on the largest descriptor ring
1580 * The idea is to wrap the largest ring a number of times using 64
1581 * send/receive pairs during each loop
1582 */
1583
1584 if (rx_ring->count <= tx_ring->count)
1585 lc = ((tx_ring->count / 64) * 2) + 1;
1586 else
1587 lc = ((rx_ring->count / 64) * 2) + 1;
1588
1589 k = 0;
1590 l = 0;
1591 for (j = 0; j <= lc; j++) { /* loop count loop */
1592 for (i = 0; i < 64; i++) { /* send the packets */
1593 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1594 1024);
1595 dma_sync_single_for_device(&pdev->dev,
1596 tx_ring->buffer_info[k].dma,
1597 tx_ring->buffer_info[k].length,
1598 DMA_TO_DEVICE);
1599 k++;
1600 if (k == tx_ring->count)
1601 k = 0;
1602 }
1603 ew32(TDT, k);
1604 e1e_flush();
1605 msleep(200);
1606 time = jiffies; /* set the start time for the receive */
1607 good_cnt = 0;
1608 do { /* receive the sent packets */
1609 dma_sync_single_for_cpu(&pdev->dev,
1610 rx_ring->buffer_info[l].dma, 2048,
1611 DMA_FROM_DEVICE);
1612
1613 ret_val = e1000_check_lbtest_frame(
1614 rx_ring->buffer_info[l].skb, 1024);
1615 if (!ret_val)
1616 good_cnt++;
1617 l++;
1618 if (l == rx_ring->count)
1619 l = 0;
1620 /*
1621 * time + 20 msecs (200 msecs on 2.4) is more than
1622 * enough time to complete the receives, if it's
1623 * exceeded, break and error off
1624 */
1625 } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1626 if (good_cnt != 64) {
1627 ret_val = 13; /* ret_val is the same as mis-compare */
1628 break;
1629 }
1630 if (jiffies >= (time + 20)) {
1631 ret_val = 14; /* error code for time out error */
1632 break;
1633 }
1634 } /* end loop count loop */
1635 return ret_val;
1636}
1637
1638static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1639{
1640 /*
1641 * PHY loopback cannot be performed if SoL/IDER
1642 * sessions are active
1643 */
1644 if (e1000_check_reset_block(&adapter->hw)) {
1645 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1646 *data = 0;
1647 goto out;
1648 }
1649
1650 *data = e1000_setup_desc_rings(adapter);
1651 if (*data)
1652 goto out;
1653
1654 *data = e1000_setup_loopback_test(adapter);
1655 if (*data)
1656 goto err_loopback;
1657
1658 *data = e1000_run_loopback_test(adapter);
1659 e1000_loopback_cleanup(adapter);
1660
1661err_loopback:
1662 e1000_free_desc_rings(adapter);
1663out:
1664 return *data;
1665}
1666
1667static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1668{
1669 struct e1000_hw *hw = &adapter->hw;
1670
1671 *data = 0;
1672 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1673 int i = 0;
1674 hw->mac.serdes_has_link = false;
1675
1676 /*
1677 * On some blade server designs, link establishment
1678 * could take as long as 2-3 minutes
1679 */
1680 do {
1681 hw->mac.ops.check_for_link(hw);
1682 if (hw->mac.serdes_has_link)
1683 return *data;
1684 msleep(20);
1685 } while (i++ < 3750);
1686
1687 *data = 1;
1688 } else {
1689 hw->mac.ops.check_for_link(hw);
1690 if (hw->mac.autoneg)
1691 /*
1692 * On some Phy/switch combinations, link establishment
1693 * can take a few seconds more than expected.
1694 */
1695 msleep(5000);
1696
1697 if (!(er32(STATUS) & E1000_STATUS_LU))
1698 *data = 1;
1699 }
1700 return *data;
1701}
1702
1703static int e1000e_get_sset_count(struct net_device *netdev, int sset)
1704{
1705 switch (sset) {
1706 case ETH_SS_TEST:
1707 return E1000_TEST_LEN;
1708 case ETH_SS_STATS:
1709 return E1000_STATS_LEN;
1710 default:
1711 return -EOPNOTSUPP;
1712 }
1713}
1714
1715static void e1000_diag_test(struct net_device *netdev,
1716 struct ethtool_test *eth_test, u64 *data)
1717{
1718 struct e1000_adapter *adapter = netdev_priv(netdev);
1719 u16 autoneg_advertised;
1720 u8 forced_speed_duplex;
1721 u8 autoneg;
1722 bool if_running = netif_running(netdev);
1723
1724 set_bit(__E1000_TESTING, &adapter->state);
1725
1726 if (!if_running) {
1727 /* Get control of and reset hardware */
1728 if (adapter->flags & FLAG_HAS_AMT)
1729 e1000e_get_hw_control(adapter);
1730
1731 e1000e_power_up_phy(adapter);
1732
1733 adapter->hw.phy.autoneg_wait_to_complete = 1;
1734 e1000e_reset(adapter);
1735 adapter->hw.phy.autoneg_wait_to_complete = 0;
1736 }
1737
1738 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1739 /* Offline tests */
1740
1741 /* save speed, duplex, autoneg settings */
1742 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1743 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1744 autoneg = adapter->hw.mac.autoneg;
1745
1746 e_info("offline testing starting\n");
1747
1748 if (if_running)
1749 /* indicate we're in test mode */
1750 dev_close(netdev);
1751
1752 if (e1000_reg_test(adapter, &data[0]))
1753 eth_test->flags |= ETH_TEST_FL_FAILED;
1754
1755 e1000e_reset(adapter);
1756 if (e1000_eeprom_test(adapter, &data[1]))
1757 eth_test->flags |= ETH_TEST_FL_FAILED;
1758
1759 e1000e_reset(adapter);
1760 if (e1000_intr_test(adapter, &data[2]))
1761 eth_test->flags |= ETH_TEST_FL_FAILED;
1762
1763 e1000e_reset(adapter);
1764 if (e1000_loopback_test(adapter, &data[3]))
1765 eth_test->flags |= ETH_TEST_FL_FAILED;
1766
1767 /* force this routine to wait until autoneg complete/timeout */
1768 adapter->hw.phy.autoneg_wait_to_complete = 1;
1769 e1000e_reset(adapter);
1770 adapter->hw.phy.autoneg_wait_to_complete = 0;
1771
1772 if (e1000_link_test(adapter, &data[4]))
1773 eth_test->flags |= ETH_TEST_FL_FAILED;
1774
1775 /* restore speed, duplex, autoneg settings */
1776 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1777 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1778 adapter->hw.mac.autoneg = autoneg;
1779 e1000e_reset(adapter);
1780
1781 clear_bit(__E1000_TESTING, &adapter->state);
1782 if (if_running)
1783 dev_open(netdev);
1784 } else {
1785 /* Online tests */
1786
1787 e_info("online testing starting\n");
1788
1789 /* register, eeprom, intr and loopback tests not run online */
1790 data[0] = 0;
1791 data[1] = 0;
1792 data[2] = 0;
1793 data[3] = 0;
1794
1795 if (e1000_link_test(adapter, &data[4]))
1796 eth_test->flags |= ETH_TEST_FL_FAILED;
1797
1798 clear_bit(__E1000_TESTING, &adapter->state);
1799 }
1800
1801 if (!if_running) {
1802 e1000e_reset(adapter);
1803
1804 if (adapter->flags & FLAG_HAS_AMT)
1805 e1000e_release_hw_control(adapter);
1806 }
1807
1808 msleep_interruptible(4 * 1000);
1809}
1810
1811static void e1000_get_wol(struct net_device *netdev,
1812 struct ethtool_wolinfo *wol)
1813{
1814 struct e1000_adapter *adapter = netdev_priv(netdev);
1815
1816 wol->supported = 0;
1817 wol->wolopts = 0;
1818
1819 if (!(adapter->flags & FLAG_HAS_WOL) ||
1820 !device_can_wakeup(&adapter->pdev->dev))
1821 return;
1822
1823 wol->supported = WAKE_UCAST | WAKE_MCAST |
1824 WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
1825
1826 /* apply any specific unsupported masks here */
1827 if (adapter->flags & FLAG_NO_WAKE_UCAST) {
1828 wol->supported &= ~WAKE_UCAST;
1829
1830 if (adapter->wol & E1000_WUFC_EX)
1831 e_err("Interface does not support directed (unicast) "
1832 "frame wake-up packets\n");
1833 }
1834
1835 if (adapter->wol & E1000_WUFC_EX)
1836 wol->wolopts |= WAKE_UCAST;
1837 if (adapter->wol & E1000_WUFC_MC)
1838 wol->wolopts |= WAKE_MCAST;
1839 if (adapter->wol & E1000_WUFC_BC)
1840 wol->wolopts |= WAKE_BCAST;
1841 if (adapter->wol & E1000_WUFC_MAG)
1842 wol->wolopts |= WAKE_MAGIC;
1843 if (adapter->wol & E1000_WUFC_LNKC)
1844 wol->wolopts |= WAKE_PHY;
1845}
1846
1847static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1848{
1849 struct e1000_adapter *adapter = netdev_priv(netdev);
1850
1851 if (!(adapter->flags & FLAG_HAS_WOL) ||
1852 !device_can_wakeup(&adapter->pdev->dev) ||
1853 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1854 WAKE_MAGIC | WAKE_PHY)))
1855 return -EOPNOTSUPP;
1856
1857 /* these settings will always override what we currently have */
1858 adapter->wol = 0;
1859
1860 if (wol->wolopts & WAKE_UCAST)
1861 adapter->wol |= E1000_WUFC_EX;
1862 if (wol->wolopts & WAKE_MCAST)
1863 adapter->wol |= E1000_WUFC_MC;
1864 if (wol->wolopts & WAKE_BCAST)
1865 adapter->wol |= E1000_WUFC_BC;
1866 if (wol->wolopts & WAKE_MAGIC)
1867 adapter->wol |= E1000_WUFC_MAG;
1868 if (wol->wolopts & WAKE_PHY)
1869 adapter->wol |= E1000_WUFC_LNKC;
1870
1871 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1872
1873 return 0;
1874}
1875
1876static int e1000_set_phys_id(struct net_device *netdev,
1877 enum ethtool_phys_id_state state)
1878{
1879 struct e1000_adapter *adapter = netdev_priv(netdev);
1880 struct e1000_hw *hw = &adapter->hw;
1881
1882 switch (state) {
1883 case ETHTOOL_ID_ACTIVE:
1884 if (!hw->mac.ops.blink_led)
1885 return 2; /* cycle on/off twice per second */
1886
1887 hw->mac.ops.blink_led(hw);
1888 break;
1889
1890 case ETHTOOL_ID_INACTIVE:
1891 if (hw->phy.type == e1000_phy_ife)
1892 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1893 hw->mac.ops.led_off(hw);
1894 hw->mac.ops.cleanup_led(hw);
1895 break;
1896
1897 case ETHTOOL_ID_ON:
1898 adapter->hw.mac.ops.led_on(&adapter->hw);
1899 break;
1900
1901 case ETHTOOL_ID_OFF:
1902 adapter->hw.mac.ops.led_off(&adapter->hw);
1903 break;
1904 }
1905 return 0;
1906}
1907
1908static int e1000_get_coalesce(struct net_device *netdev,
1909 struct ethtool_coalesce *ec)
1910{
1911 struct e1000_adapter *adapter = netdev_priv(netdev);
1912
1913 if (adapter->itr_setting <= 4)
1914 ec->rx_coalesce_usecs = adapter->itr_setting;
1915 else
1916 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1917
1918 return 0;
1919}
1920
1921static int e1000_set_coalesce(struct net_device *netdev,
1922 struct ethtool_coalesce *ec)
1923{
1924 struct e1000_adapter *adapter = netdev_priv(netdev);
1925 struct e1000_hw *hw = &adapter->hw;
1926
1927 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1928 ((ec->rx_coalesce_usecs > 4) &&
1929 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1930 (ec->rx_coalesce_usecs == 2))
1931 return -EINVAL;
1932
1933 if (ec->rx_coalesce_usecs == 4) {
1934 adapter->itr = adapter->itr_setting = 4;
1935 } else if (ec->rx_coalesce_usecs <= 3) {
1936 adapter->itr = 20000;
1937 adapter->itr_setting = ec->rx_coalesce_usecs;
1938 } else {
1939 adapter->itr = (1000000 / ec->rx_coalesce_usecs);
1940 adapter->itr_setting = adapter->itr & ~3;
1941 }
1942
1943 if (adapter->itr_setting != 0)
1944 ew32(ITR, 1000000000 / (adapter->itr * 256));
1945 else
1946 ew32(ITR, 0);
1947
1948 return 0;
1949}
1950
1951static int e1000_nway_reset(struct net_device *netdev)
1952{
1953 struct e1000_adapter *adapter = netdev_priv(netdev);
1954
1955 if (!netif_running(netdev))
1956 return -EAGAIN;
1957
1958 if (!adapter->hw.mac.autoneg)
1959 return -EINVAL;
1960
1961 e1000e_reinit_locked(adapter);
1962
1963 return 0;
1964}
1965
1966static void e1000_get_ethtool_stats(struct net_device *netdev,
1967 struct ethtool_stats *stats,
1968 u64 *data)
1969{
1970 struct e1000_adapter *adapter = netdev_priv(netdev);
1971 struct rtnl_link_stats64 net_stats;
1972 int i;
1973 char *p = NULL;
1974
1975 e1000e_get_stats64(netdev, &net_stats);
1976 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1977 switch (e1000_gstrings_stats[i].type) {
1978 case NETDEV_STATS:
1979 p = (char *) &net_stats +
1980 e1000_gstrings_stats[i].stat_offset;
1981 break;
1982 case E1000_STATS:
1983 p = (char *) adapter +
1984 e1000_gstrings_stats[i].stat_offset;
1985 break;
1986 default:
1987 data[i] = 0;
1988 continue;
1989 }
1990
1991 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1992 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1993 }
1994}
1995
1996static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1997 u8 *data)
1998{
1999 u8 *p = data;
2000 int i;
2001
2002 switch (stringset) {
2003 case ETH_SS_TEST:
2004 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2005 break;
2006 case ETH_SS_STATS:
2007 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
2008 memcpy(p, e1000_gstrings_stats[i].stat_string,
2009 ETH_GSTRING_LEN);
2010 p += ETH_GSTRING_LEN;
2011 }
2012 break;
2013 }
2014}
2015
2016static int e1000e_set_flags(struct net_device *netdev, u32 data)
2017{
2018 struct e1000_adapter *adapter = netdev_priv(netdev);
2019 bool need_reset = false;
2020 int rc;
2021
2022 need_reset = (data & ETH_FLAG_RXVLAN) !=
2023 (netdev->features & NETIF_F_HW_VLAN_RX);
2024
2025 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
2026 ETH_FLAG_TXVLAN);
2027
2028 if (rc)
2029 return rc;
2030
2031 if (need_reset) {
2032 if (netif_running(netdev))
2033 e1000e_reinit_locked(adapter);
2034 else
2035 e1000e_reset(adapter);
2036 }
2037
2038 return 0;
2039}
2040
2041static const struct ethtool_ops e1000_ethtool_ops = {
2042 .get_settings = e1000_get_settings,
2043 .set_settings = e1000_set_settings,
2044 .get_drvinfo = e1000_get_drvinfo,
2045 .get_regs_len = e1000_get_regs_len,
2046 .get_regs = e1000_get_regs,
2047 .get_wol = e1000_get_wol,
2048 .set_wol = e1000_set_wol,
2049 .get_msglevel = e1000_get_msglevel,
2050 .set_msglevel = e1000_set_msglevel,
2051 .nway_reset = e1000_nway_reset,
2052 .get_link = ethtool_op_get_link,
2053 .get_eeprom_len = e1000_get_eeprom_len,
2054 .get_eeprom = e1000_get_eeprom,
2055 .set_eeprom = e1000_set_eeprom,
2056 .get_ringparam = e1000_get_ringparam,
2057 .set_ringparam = e1000_set_ringparam,
2058 .get_pauseparam = e1000_get_pauseparam,
2059 .set_pauseparam = e1000_set_pauseparam,
2060 .get_rx_csum = e1000_get_rx_csum,
2061 .set_rx_csum = e1000_set_rx_csum,
2062 .get_tx_csum = e1000_get_tx_csum,
2063 .set_tx_csum = e1000_set_tx_csum,
2064 .get_sg = ethtool_op_get_sg,
2065 .set_sg = ethtool_op_set_sg,
2066 .get_tso = ethtool_op_get_tso,
2067 .set_tso = e1000_set_tso,
2068 .self_test = e1000_diag_test,
2069 .get_strings = e1000_get_strings,
2070 .set_phys_id = e1000_set_phys_id,
2071 .get_ethtool_stats = e1000_get_ethtool_stats,
2072 .get_sset_count = e1000e_get_sset_count,
2073 .get_coalesce = e1000_get_coalesce,
2074 .set_coalesce = e1000_set_coalesce,
2075 .get_flags = ethtool_op_get_flags,
2076 .set_flags = e1000e_set_flags,
2077};
2078
2079void e1000e_set_ethtool_ops(struct net_device *netdev)
2080{
2081 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
2082}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
new file mode 100644
index 00000000000..29670397079
--- /dev/null
+++ b/drivers/net/e1000e/hw.h
@@ -0,0 +1,984 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _E1000_HW_H_
30#define _E1000_HW_H_
31
32#include <linux/types.h>
33
34struct e1000_hw;
35struct e1000_adapter;
36
37#include "defines.h"
38
39#define er32(reg) __er32(hw, E1000_##reg)
40#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
41#define e1e_flush() er32(STATUS)
42
43#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
44 (writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
45
46#define E1000_READ_REG_ARRAY(a, reg, offset) \
47 (readl((a)->hw_addr + reg + ((offset) << 2)))
48
49enum e1e_registers {
50 E1000_CTRL = 0x00000, /* Device Control - RW */
51 E1000_STATUS = 0x00008, /* Device Status - RO */
52 E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
53 E1000_EERD = 0x00014, /* EEPROM Read - RW */
54 E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
55 E1000_FLA = 0x0001C, /* Flash Access - RW */
56 E1000_MDIC = 0x00020, /* MDI Control - RW */
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
62 E1000_FCT = 0x00030, /* Flow Control Type - RW */
63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
64 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
65 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
66 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
67 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
68 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
69 E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
70 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
71 E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
72 E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
73#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
74 E1000_RCTL = 0x00100, /* Rx Control - RW */
75 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
76 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
77 E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
78 E1000_TCTL = 0x00400, /* Tx Control - RW */
79 E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
80 E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
81 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
82 E1000_LEDCTL = 0x00E00, /* LED Control - RW */
83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
86#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
87 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
88 E1000_PBS = 0x01008, /* Packet Buffer Size */
89 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
90 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
91 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
92 E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
93 E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
94 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
95 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
96 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
97 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
98 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
99 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
100 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
101 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106
107/* Convenience macros
108 *
109 * Note: "_n" is the queue number of the register to be written to.
110 *
111 * Example usage:
112 * E1000_RDBAL_REG(current_rx_queue)
113 *
114 */
115#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
116 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
117 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
118 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
119 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
120 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
121 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
125 E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
126 E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
127#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
128 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
129 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
130 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
131 E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
132 E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
133 E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
134 E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
135 E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
136 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
137 E1000_COLC = 0x04028, /* Collision Count - R/clr */
138 E1000_DC = 0x04030, /* Defer Count - R/clr */
139 E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
140 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
141 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
142 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
143 E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
144 E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
145 E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
146 E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
147 E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
148 E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
149 E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
150 E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
151 E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
152 E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
153 E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
154 E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
155 E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
156 E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
157 E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
158 E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
159 E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
160 E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
161 E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
162 E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
163 E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
164 E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
165 E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
166 E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
167 E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
168 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
169 E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
170 E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
171 E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
172 E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
173 E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
174 E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
175 E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
176 E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
177 E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
178 E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
179 E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
180 E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
181 E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
182 E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
183 E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
184 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
185 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
186 E1000_IAC = 0x04100, /* Interrupt Assertion Count */
187 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
188 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
189 E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
190 E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
191 E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
192 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
193 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
194 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
195 E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
196 E1000_RFCTL = 0x05008, /* Receive Filter Control */
197 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
198 E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
199#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
200#define E1000_RA (E1000_RAL(0))
201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
203 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
204 E1000_WUC = 0x05800, /* Wakeup Control - RW */
205 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
206 E1000_WUS = 0x05810, /* Wakeup Status - RO */
207 E1000_MANC = 0x05820, /* Management Control - RW */
208 E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
209 E1000_HOST_IF = 0x08800, /* Host Interface */
210
211 E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
212 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
213 E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
214#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
215 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
216 E1000_GCR = 0x05B00, /* PCI-Ex Control */
217 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
218 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
219 E1000_SWSM = 0x05B50, /* SW Semaphore */
220 E1000_FWSM = 0x05B54, /* FW Semaphore */
221 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
222 E1000_FFLT_DBG = 0x05F04, /* Debug Register */
223 E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
224#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
225#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
226 E1000_HICR = 0x08F00, /* Host Interface Control */
227};
228
229#define E1000_MAX_PHY_ADDR 4
230
231/* IGP01E1000 Specific Registers */
232#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
233#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
234#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
235#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
236#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
237#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
238#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
239#define IGP_PAGE_SHIFT 5
240#define PHY_REG_MASK 0x1F
241
242#define BM_WUC_PAGE 800
243#define BM_WUC_ADDRESS_OPCODE 0x11
244#define BM_WUC_DATA_OPCODE 0x12
245#define BM_WUC_ENABLE_PAGE 769
246#define BM_WUC_ENABLE_REG 17
247#define BM_WUC_ENABLE_BIT (1 << 2)
248#define BM_WUC_HOST_WU_BIT (1 << 4)
249#define BM_WUC_ME_WU_BIT (1 << 5)
250
251#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
252#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
253#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
254
255#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
256#define IGP01E1000_PHY_POLARITY_MASK 0x0078
257
258#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
259#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
260
261#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
262
263#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
264#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
265#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
266
267#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
268
269#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
270#define IGP01E1000_PSSR_MDIX 0x0800
271#define IGP01E1000_PSSR_SPEED_MASK 0xC000
272#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
273
274#define IGP02E1000_PHY_CHANNEL_NUM 4
275#define IGP02E1000_PHY_AGC_A 0x11B1
276#define IGP02E1000_PHY_AGC_B 0x12B1
277#define IGP02E1000_PHY_AGC_C 0x14B1
278#define IGP02E1000_PHY_AGC_D 0x18B1
279
280#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
281#define IGP02E1000_AGC_LENGTH_MASK 0x7F
282#define IGP02E1000_AGC_RANGE 15
283
284/* manage.c */
285#define E1000_VFTA_ENTRY_SHIFT 5
286#define E1000_VFTA_ENTRY_MASK 0x7F
287#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
288
289#define E1000_HICR_EN 0x01 /* Enable bit - RO */
290/* Driver sets this bit when done to put command in RAM */
291#define E1000_HICR_C 0x02
292#define E1000_HICR_FW_RESET_ENABLE 0x40
293#define E1000_HICR_FW_RESET 0x80
294
295#define E1000_FWSM_MODE_MASK 0xE
296#define E1000_FWSM_MODE_SHIFT 1
297
298#define E1000_MNG_IAMT_MODE 0x3
299#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
300#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
301#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
302#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
303#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
304#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
305
306/* nvm.c */
307#define E1000_STM_OPCODE 0xDB00
308
309#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
310#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
311#define E1000_KMRNCTRLSTA_REN 0x00200000
312#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
313#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
314#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
315#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
316#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
317#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
318#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
319#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
320#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
321
322#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
323#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
324#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
325#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
326
327/* IFE PHY Extended Status Control */
328#define IFE_PESC_POLARITY_REVERSED 0x0100
329
330/* IFE PHY Special Control */
331#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
332#define IFE_PSC_FORCE_POLARITY 0x0020
333
334/* IFE PHY Special Control and LED Control */
335#define IFE_PSCL_PROBE_MODE 0x0020
336#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
337#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
338
339/* IFE PHY MDIX Control */
340#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
341#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
342#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
343
344#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
345
346#define E1000_DEV_ID_82571EB_COPPER 0x105E
347#define E1000_DEV_ID_82571EB_FIBER 0x105F
348#define E1000_DEV_ID_82571EB_SERDES 0x1060
349#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
350#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
351#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
352#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
353#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
354#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
355#define E1000_DEV_ID_82572EI_COPPER 0x107D
356#define E1000_DEV_ID_82572EI_FIBER 0x107E
357#define E1000_DEV_ID_82572EI_SERDES 0x107F
358#define E1000_DEV_ID_82572EI 0x10B9
359#define E1000_DEV_ID_82573E 0x108B
360#define E1000_DEV_ID_82573E_IAMT 0x108C
361#define E1000_DEV_ID_82573L 0x109A
362#define E1000_DEV_ID_82574L 0x10D3
363#define E1000_DEV_ID_82574LA 0x10F6
364#define E1000_DEV_ID_82583V 0x150C
365
366#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
367#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
368#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
369#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
370
371#define E1000_DEV_ID_ICH8_82567V_3 0x1501
372#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
373#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
374#define E1000_DEV_ID_ICH8_IGP_C 0x104B
375#define E1000_DEV_ID_ICH8_IFE 0x104C
376#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
377#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
378#define E1000_DEV_ID_ICH8_IGP_M 0x104D
379#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
380#define E1000_DEV_ID_ICH9_BM 0x10E5
381#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
382#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
383#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
384#define E1000_DEV_ID_ICH9_IGP_C 0x294C
385#define E1000_DEV_ID_ICH9_IFE 0x10C0
386#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
387#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
388#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
389#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
390#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
391#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
392#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
393#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
394#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
395#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
396#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
397#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
398#define E1000_DEV_ID_PCH2_LV_LM 0x1502
399#define E1000_DEV_ID_PCH2_LV_V 0x1503
400
401#define E1000_REVISION_4 4
402
403#define E1000_FUNC_1 1
404
405#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
406#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
407
408enum e1000_mac_type {
409 e1000_82571,
410 e1000_82572,
411 e1000_82573,
412 e1000_82574,
413 e1000_82583,
414 e1000_80003es2lan,
415 e1000_ich8lan,
416 e1000_ich9lan,
417 e1000_ich10lan,
418 e1000_pchlan,
419 e1000_pch2lan,
420};
421
422enum e1000_media_type {
423 e1000_media_type_unknown = 0,
424 e1000_media_type_copper = 1,
425 e1000_media_type_fiber = 2,
426 e1000_media_type_internal_serdes = 3,
427 e1000_num_media_types
428};
429
430enum e1000_nvm_type {
431 e1000_nvm_unknown = 0,
432 e1000_nvm_none,
433 e1000_nvm_eeprom_spi,
434 e1000_nvm_flash_hw,
435 e1000_nvm_flash_sw
436};
437
438enum e1000_nvm_override {
439 e1000_nvm_override_none = 0,
440 e1000_nvm_override_spi_small,
441 e1000_nvm_override_spi_large
442};
443
444enum e1000_phy_type {
445 e1000_phy_unknown = 0,
446 e1000_phy_none,
447 e1000_phy_m88,
448 e1000_phy_igp,
449 e1000_phy_igp_2,
450 e1000_phy_gg82563,
451 e1000_phy_igp_3,
452 e1000_phy_ife,
453 e1000_phy_bm,
454 e1000_phy_82578,
455 e1000_phy_82577,
456 e1000_phy_82579,
457};
458
459enum e1000_bus_width {
460 e1000_bus_width_unknown = 0,
461 e1000_bus_width_pcie_x1,
462 e1000_bus_width_pcie_x2,
463 e1000_bus_width_pcie_x4 = 4,
464 e1000_bus_width_32,
465 e1000_bus_width_64,
466 e1000_bus_width_reserved
467};
468
469enum e1000_1000t_rx_status {
470 e1000_1000t_rx_status_not_ok = 0,
471 e1000_1000t_rx_status_ok,
472 e1000_1000t_rx_status_undefined = 0xFF
473};
474
475enum e1000_rev_polarity{
476 e1000_rev_polarity_normal = 0,
477 e1000_rev_polarity_reversed,
478 e1000_rev_polarity_undefined = 0xFF
479};
480
481enum e1000_fc_mode {
482 e1000_fc_none = 0,
483 e1000_fc_rx_pause,
484 e1000_fc_tx_pause,
485 e1000_fc_full,
486 e1000_fc_default = 0xFF
487};
488
489enum e1000_ms_type {
490 e1000_ms_hw_default = 0,
491 e1000_ms_force_master,
492 e1000_ms_force_slave,
493 e1000_ms_auto
494};
495
496enum e1000_smart_speed {
497 e1000_smart_speed_default = 0,
498 e1000_smart_speed_on,
499 e1000_smart_speed_off
500};
501
502enum e1000_serdes_link_state {
503 e1000_serdes_link_down = 0,
504 e1000_serdes_link_autoneg_progress,
505 e1000_serdes_link_autoneg_complete,
506 e1000_serdes_link_forced_up
507};
508
509/* Receive Descriptor */
510struct e1000_rx_desc {
511 __le64 buffer_addr; /* Address of the descriptor's data buffer */
512 __le16 length; /* Length of data DMAed into data buffer */
513 __le16 csum; /* Packet checksum */
514 u8 status; /* Descriptor status */
515 u8 errors; /* Descriptor Errors */
516 __le16 special;
517};
518
519/* Receive Descriptor - Extended */
520union e1000_rx_desc_extended {
521 struct {
522 __le64 buffer_addr;
523 __le64 reserved;
524 } read;
525 struct {
526 struct {
527 __le32 mrq; /* Multiple Rx Queues */
528 union {
529 __le32 rss; /* RSS Hash */
530 struct {
531 __le16 ip_id; /* IP id */
532 __le16 csum; /* Packet Checksum */
533 } csum_ip;
534 } hi_dword;
535 } lower;
536 struct {
537 __le32 status_error; /* ext status/error */
538 __le16 length;
539 __le16 vlan; /* VLAN tag */
540 } upper;
541 } wb; /* writeback */
542};
543
544#define MAX_PS_BUFFERS 4
545/* Receive Descriptor - Packet Split */
546union e1000_rx_desc_packet_split {
547 struct {
548 /* one buffer for protocol header(s), three data buffers */
549 __le64 buffer_addr[MAX_PS_BUFFERS];
550 } read;
551 struct {
552 struct {
553 __le32 mrq; /* Multiple Rx Queues */
554 union {
555 __le32 rss; /* RSS Hash */
556 struct {
557 __le16 ip_id; /* IP id */
558 __le16 csum; /* Packet Checksum */
559 } csum_ip;
560 } hi_dword;
561 } lower;
562 struct {
563 __le32 status_error; /* ext status/error */
564 __le16 length0; /* length of buffer 0 */
565 __le16 vlan; /* VLAN tag */
566 } middle;
567 struct {
568 __le16 header_status;
569 __le16 length[3]; /* length of buffers 1-3 */
570 } upper;
571 __le64 reserved;
572 } wb; /* writeback */
573};
574
575/* Transmit Descriptor */
576struct e1000_tx_desc {
577 __le64 buffer_addr; /* Address of the descriptor's data buffer */
578 union {
579 __le32 data;
580 struct {
581 __le16 length; /* Data buffer length */
582 u8 cso; /* Checksum offset */
583 u8 cmd; /* Descriptor control */
584 } flags;
585 } lower;
586 union {
587 __le32 data;
588 struct {
589 u8 status; /* Descriptor status */
590 u8 css; /* Checksum start */
591 __le16 special;
592 } fields;
593 } upper;
594};
595
596/* Offload Context Descriptor */
597struct e1000_context_desc {
598 union {
599 __le32 ip_config;
600 struct {
601 u8 ipcss; /* IP checksum start */
602 u8 ipcso; /* IP checksum offset */
603 __le16 ipcse; /* IP checksum end */
604 } ip_fields;
605 } lower_setup;
606 union {
607 __le32 tcp_config;
608 struct {
609 u8 tucss; /* TCP checksum start */
610 u8 tucso; /* TCP checksum offset */
611 __le16 tucse; /* TCP checksum end */
612 } tcp_fields;
613 } upper_setup;
614 __le32 cmd_and_length;
615 union {
616 __le32 data;
617 struct {
618 u8 status; /* Descriptor status */
619 u8 hdr_len; /* Header length */
620 __le16 mss; /* Maximum segment size */
621 } fields;
622 } tcp_seg_setup;
623};
624
625/* Offload data descriptor */
626struct e1000_data_desc {
627 __le64 buffer_addr; /* Address of the descriptor's buffer address */
628 union {
629 __le32 data;
630 struct {
631 __le16 length; /* Data buffer length */
632 u8 typ_len_ext;
633 u8 cmd;
634 } flags;
635 } lower;
636 union {
637 __le32 data;
638 struct {
639 u8 status; /* Descriptor status */
640 u8 popts; /* Packet Options */
641 __le16 special; /* */
642 } fields;
643 } upper;
644};
645
646/* Statistics counters collected by the MAC */
647struct e1000_hw_stats {
648 u64 crcerrs;
649 u64 algnerrc;
650 u64 symerrs;
651 u64 rxerrc;
652 u64 mpc;
653 u64 scc;
654 u64 ecol;
655 u64 mcc;
656 u64 latecol;
657 u64 colc;
658 u64 dc;
659 u64 tncrs;
660 u64 sec;
661 u64 cexterr;
662 u64 rlec;
663 u64 xonrxc;
664 u64 xontxc;
665 u64 xoffrxc;
666 u64 xofftxc;
667 u64 fcruc;
668 u64 prc64;
669 u64 prc127;
670 u64 prc255;
671 u64 prc511;
672 u64 prc1023;
673 u64 prc1522;
674 u64 gprc;
675 u64 bprc;
676 u64 mprc;
677 u64 gptc;
678 u64 gorc;
679 u64 gotc;
680 u64 rnbc;
681 u64 ruc;
682 u64 rfc;
683 u64 roc;
684 u64 rjc;
685 u64 mgprc;
686 u64 mgpdc;
687 u64 mgptc;
688 u64 tor;
689 u64 tot;
690 u64 tpr;
691 u64 tpt;
692 u64 ptc64;
693 u64 ptc127;
694 u64 ptc255;
695 u64 ptc511;
696 u64 ptc1023;
697 u64 ptc1522;
698 u64 mptc;
699 u64 bptc;
700 u64 tsctc;
701 u64 tsctfc;
702 u64 iac;
703 u64 icrxptc;
704 u64 icrxatc;
705 u64 ictxptc;
706 u64 ictxatc;
707 u64 ictxqec;
708 u64 ictxqmtc;
709 u64 icrxdmtc;
710 u64 icrxoc;
711};
712
713struct e1000_phy_stats {
714 u32 idle_errors;
715 u32 receive_errors;
716};
717
718struct e1000_host_mng_dhcp_cookie {
719 u32 signature;
720 u8 status;
721 u8 reserved0;
722 u16 vlan_id;
723 u32 reserved1;
724 u16 reserved2;
725 u8 reserved3;
726 u8 checksum;
727};
728
729/* Host Interface "Rev 1" */
730struct e1000_host_command_header {
731 u8 command_id;
732 u8 command_length;
733 u8 command_options;
734 u8 checksum;
735};
736
737#define E1000_HI_MAX_DATA_LENGTH 252
738struct e1000_host_command_info {
739 struct e1000_host_command_header command_header;
740 u8 command_data[E1000_HI_MAX_DATA_LENGTH];
741};
742
743/* Host Interface "Rev 2" */
744struct e1000_host_mng_command_header {
745 u8 command_id;
746 u8 checksum;
747 u16 reserved1;
748 u16 reserved2;
749 u16 command_length;
750};
751
752#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
753struct e1000_host_mng_command_info {
754 struct e1000_host_mng_command_header command_header;
755 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
756};
757
758/* Function pointers and static data for the MAC. */
759struct e1000_mac_operations {
760 s32 (*id_led_init)(struct e1000_hw *);
761 s32 (*blink_led)(struct e1000_hw *);
762 bool (*check_mng_mode)(struct e1000_hw *);
763 s32 (*check_for_link)(struct e1000_hw *);
764 s32 (*cleanup_led)(struct e1000_hw *);
765 void (*clear_hw_cntrs)(struct e1000_hw *);
766 void (*clear_vfta)(struct e1000_hw *);
767 s32 (*get_bus_info)(struct e1000_hw *);
768 void (*set_lan_id)(struct e1000_hw *);
769 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
770 s32 (*led_on)(struct e1000_hw *);
771 s32 (*led_off)(struct e1000_hw *);
772 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
773 s32 (*reset_hw)(struct e1000_hw *);
774 s32 (*init_hw)(struct e1000_hw *);
775 s32 (*setup_link)(struct e1000_hw *);
776 s32 (*setup_physical_interface)(struct e1000_hw *);
777 s32 (*setup_led)(struct e1000_hw *);
778 void (*write_vfta)(struct e1000_hw *, u32, u32);
779 s32 (*read_mac_addr)(struct e1000_hw *);
780};
781
782/*
783 * When to use various PHY register access functions:
784 *
785 * Func Caller
786 * Function Does Does When to use
787 * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
788 * X_reg L,P,A n/a for simple PHY reg accesses
789 * X_reg_locked P,A L for multiple accesses of different regs
790 * on different pages
791 * X_reg_page A L,P for multiple accesses of different regs
792 * on the same page
793 *
794 * Where X=[read|write], L=locking, P=sets page, A=register access
795 *
796 */
797struct e1000_phy_operations {
798 s32 (*acquire)(struct e1000_hw *);
799 s32 (*cfg_on_link_up)(struct e1000_hw *);
800 s32 (*check_polarity)(struct e1000_hw *);
801 s32 (*check_reset_block)(struct e1000_hw *);
802 s32 (*commit)(struct e1000_hw *);
803 s32 (*force_speed_duplex)(struct e1000_hw *);
804 s32 (*get_cfg_done)(struct e1000_hw *hw);
805 s32 (*get_cable_length)(struct e1000_hw *);
806 s32 (*get_info)(struct e1000_hw *);
807 s32 (*set_page)(struct e1000_hw *, u16);
808 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
809 s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
810 s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
811 void (*release)(struct e1000_hw *);
812 s32 (*reset)(struct e1000_hw *);
813 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
814 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
815 s32 (*write_reg)(struct e1000_hw *, u32, u16);
816 s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
817 s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
818 void (*power_up)(struct e1000_hw *);
819 void (*power_down)(struct e1000_hw *);
820};
821
822/* Function pointers for the NVM. */
823struct e1000_nvm_operations {
824 s32 (*acquire)(struct e1000_hw *);
825 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
826 void (*release)(struct e1000_hw *);
827 s32 (*update)(struct e1000_hw *);
828 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
829 s32 (*validate)(struct e1000_hw *);
830 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
831};
832
833struct e1000_mac_info {
834 struct e1000_mac_operations ops;
835 u8 addr[ETH_ALEN];
836 u8 perm_addr[ETH_ALEN];
837
838 enum e1000_mac_type type;
839
840 u32 collision_delta;
841 u32 ledctl_default;
842 u32 ledctl_mode1;
843 u32 ledctl_mode2;
844 u32 mc_filter_type;
845 u32 tx_packet_delta;
846 u32 txcw;
847
848 u16 current_ifs_val;
849 u16 ifs_max_val;
850 u16 ifs_min_val;
851 u16 ifs_ratio;
852 u16 ifs_step_size;
853 u16 mta_reg_count;
854
855 /* Maximum size of the MTA register table in all supported adapters */
856 #define MAX_MTA_REG 128
857 u32 mta_shadow[MAX_MTA_REG];
858 u16 rar_entry_count;
859
860 u8 forced_speed_duplex;
861
862 bool adaptive_ifs;
863 bool has_fwsm;
864 bool arc_subsystem_valid;
865 bool autoneg;
866 bool autoneg_failed;
867 bool get_link_status;
868 bool in_ifs_mode;
869 bool serdes_has_link;
870 bool tx_pkt_filtering;
871 enum e1000_serdes_link_state serdes_link_state;
872};
873
874struct e1000_phy_info {
875 struct e1000_phy_operations ops;
876
877 enum e1000_phy_type type;
878
879 enum e1000_1000t_rx_status local_rx;
880 enum e1000_1000t_rx_status remote_rx;
881 enum e1000_ms_type ms_type;
882 enum e1000_ms_type original_ms_type;
883 enum e1000_rev_polarity cable_polarity;
884 enum e1000_smart_speed smart_speed;
885
886 u32 addr;
887 u32 id;
888 u32 reset_delay_us; /* in usec */
889 u32 revision;
890
891 enum e1000_media_type media_type;
892
893 u16 autoneg_advertised;
894 u16 autoneg_mask;
895 u16 cable_length;
896 u16 max_cable_length;
897 u16 min_cable_length;
898
899 u8 mdix;
900
901 bool disable_polarity_correction;
902 bool is_mdix;
903 bool polarity_correction;
904 bool speed_downgraded;
905 bool autoneg_wait_to_complete;
906};
907
908struct e1000_nvm_info {
909 struct e1000_nvm_operations ops;
910
911 enum e1000_nvm_type type;
912 enum e1000_nvm_override override;
913
914 u32 flash_bank_size;
915 u32 flash_base_addr;
916
917 u16 word_size;
918 u16 delay_usec;
919 u16 address_bits;
920 u16 opcode_bits;
921 u16 page_size;
922};
923
924struct e1000_bus_info {
925 enum e1000_bus_width width;
926
927 u16 func;
928};
929
930struct e1000_fc_info {
931 u32 high_water; /* Flow control high-water mark */
932 u32 low_water; /* Flow control low-water mark */
933 u16 pause_time; /* Flow control pause timer */
934 u16 refresh_time; /* Flow control refresh timer */
935 bool send_xon; /* Flow control send XON */
936 bool strict_ieee; /* Strict IEEE mode */
937 enum e1000_fc_mode current_mode; /* FC mode in effect */
938 enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
939};
940
941struct e1000_dev_spec_82571 {
942 bool laa_is_present;
943 u32 smb_counter;
944};
945
946struct e1000_dev_spec_80003es2lan {
947 bool mdic_wa_enable;
948};
949
950struct e1000_shadow_ram {
951 u16 value;
952 bool modified;
953};
954
955#define E1000_ICH8_SHADOW_RAM_WORDS 2048
956
957struct e1000_dev_spec_ich8lan {
958 bool kmrn_lock_loss_workaround_enabled;
959 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
960 bool nvm_k1_enabled;
961 bool eee_disable;
962};
963
964struct e1000_hw {
965 struct e1000_adapter *adapter;
966
967 u8 __iomem *hw_addr;
968 u8 __iomem *flash_address;
969
970 struct e1000_mac_info mac;
971 struct e1000_fc_info fc;
972 struct e1000_phy_info phy;
973 struct e1000_nvm_info nvm;
974 struct e1000_bus_info bus;
975 struct e1000_host_mng_dhcp_cookie mng_cookie;
976
977 union {
978 struct e1000_dev_spec_82571 e82571;
979 struct e1000_dev_spec_80003es2lan e80003es2lan;
980 struct e1000_dev_spec_ich8lan ich8lan;
981 } dev_spec;
982};
983
984#endif
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
new file mode 100644
index 00000000000..54add27c8f7
--- /dev/null
+++ b/drivers/net/e1000e/ich8lan.c
@@ -0,0 +1,4150 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection
34 * 82562V 10/100 Network Connection
35 * 82562V-2 10/100 Network Connection
36 * 82566DC-2 Gigabit Network Connection
37 * 82566DC Gigabit Network Connection
38 * 82566DM-2 Gigabit Network Connection
39 * 82566DM Gigabit Network Connection
40 * 82566MC Gigabit Network Connection
41 * 82566MM Gigabit Network Connection
42 * 82567LM Gigabit Network Connection
43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
45 * 82567LM-2 Gigabit Network Connection
46 * 82567LF-2 Gigabit Network Connection
47 * 82567V-2 Gigabit Network Connection
48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
57 */
58
59#include "e1000.h"
60
61#define ICH_FLASH_GFPREG 0x0000
62#define ICH_FLASH_HSFSTS 0x0004
63#define ICH_FLASH_HSFCTL 0x0006
64#define ICH_FLASH_FADDR 0x0008
65#define ICH_FLASH_FDATA0 0x0010
66#define ICH_FLASH_PR0 0x0074
67
68#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
69#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
70#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
71#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
72#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
73
74#define ICH_CYCLE_READ 0
75#define ICH_CYCLE_WRITE 2
76#define ICH_CYCLE_ERASE 3
77
78#define FLASH_GFPREG_BASE_MASK 0x1FFF
79#define FLASH_SECTOR_ADDR_SHIFT 12
80
81#define ICH_FLASH_SEG_SIZE_256 256
82#define ICH_FLASH_SEG_SIZE_4K 4096
83#define ICH_FLASH_SEG_SIZE_8K 8192
84#define ICH_FLASH_SEG_SIZE_64K 65536
85
86
87#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
88/* FW established a valid mode */
89#define E1000_ICH_FWSM_FW_VALID 0x00008000
90
91#define E1000_ICH_MNG_IAMT_MODE 0x2
92
93#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
94 (ID_LED_DEF1_OFF2 << 8) | \
95 (ID_LED_DEF1_ON2 << 4) | \
96 (ID_LED_DEF1_DEF2))
97
98#define E1000_ICH_NVM_SIG_WORD 0x13
99#define E1000_ICH_NVM_SIG_MASK 0xC000
100#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
101#define E1000_ICH_NVM_SIG_VALUE 0x80
102
103#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
104
105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113
114#define E1000_ICH_RAR_ENTRIES 7
115
116#define PHY_PAGE_SHIFT 5
117#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
118 ((reg) & MAX_PHY_REG_ADDRESS))
119#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
120#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
121
122#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
123#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
124#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
125
126#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
127
128#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129
130/* SMBus Address Phy Register */
131#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F
133#define HV_SMB_ADDR_PEC_EN 0x0200
134#define HV_SMB_ADDR_VALID 0x0080
135
136/* PHY Power Management Control */
137#define HV_PM_CTRL PHY_REG(770, 17)
138
139/* PHY Low Power Idle Control */
140#define I82579_LPI_CTRL PHY_REG(772, 20)
141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
143
144/* EMI Registers */
145#define I82579_EMI_ADDR 0x10
146#define I82579_EMI_DATA 0x11
147#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148
149/* Strapping Option Register - RO */
150#define E1000_STRAP 0x0000C
151#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
152#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
153
154/* OEM Bits Phy Register */
155#define HV_OEM_BITS PHY_REG(768, 25)
156#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
157#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
158#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
159
160#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
161#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
162
163/* KMRN Mode Control */
164#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
165#define HV_KMRN_MDIO_SLOW 0x0400
166
167/* KMRN FIFO Control and Status */
168#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
169#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
170#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
171
172/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
173/* Offset 04h HSFSTS */
174union ich8_hws_flash_status {
175 struct ich8_hsfsts {
176 u16 flcdone :1; /* bit 0 Flash Cycle Done */
177 u16 flcerr :1; /* bit 1 Flash Cycle Error */
178 u16 dael :1; /* bit 2 Direct Access error Log */
179 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
180 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
181 u16 reserved1 :2; /* bit 13:6 Reserved */
182 u16 reserved2 :6; /* bit 13:6 Reserved */
183 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
184 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
185 } hsf_status;
186 u16 regval;
187};
188
189/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
190/* Offset 06h FLCTL */
191union ich8_hws_flash_ctrl {
192 struct ich8_hsflctl {
193 u16 flcgo :1; /* 0 Flash Cycle Go */
194 u16 flcycle :2; /* 2:1 Flash Cycle */
195 u16 reserved :5; /* 7:3 Reserved */
196 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
197 u16 flockdn :6; /* 15:10 Reserved */
198 } hsf_ctrl;
199 u16 regval;
200};
201
202/* ICH Flash Region Access Permissions */
203union ich8_hws_flash_regacc {
204 struct ich8_flracc {
205 u32 grra :8; /* 0:7 GbE region Read Access */
206 u32 grwa :8; /* 8:15 GbE region Write Access */
207 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
208 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
209 } hsf_flregacc;
210 u16 regval;
211};
212
213/* ICH Flash Protected Region */
214union ich8_flash_protected_range {
215 struct ich8_pr {
216 u32 base:13; /* 0:12 Protected Range Base */
217 u32 reserved1:2; /* 13:14 Reserved */
218 u32 rpe:1; /* 15 Read Protection Enable */
219 u32 limit:13; /* 16:28 Protected Range Limit */
220 u32 reserved2:2; /* 29:30 Reserved */
221 u32 wpe:1; /* 31 Write Protection Enable */
222 } range;
223 u32 regval;
224};
225
226static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
227static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
228static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
229static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
230static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
231 u32 offset, u8 byte);
232static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
233 u8 *data);
234static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
235 u16 *data);
236static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
237 u8 size, u16 *data);
238static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
239static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
240static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
241static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
242static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
243static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
244static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
245static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
246static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
247static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
248static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
249static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
250static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
251static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
252static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
253static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
254static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
255static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
256static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
257static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
258
259static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
260{
261 return readw(hw->flash_address + reg);
262}
263
264static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
265{
266 return readl(hw->flash_address + reg);
267}
268
269static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
270{
271 writew(val, hw->flash_address + reg);
272}
273
274static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
275{
276 writel(val, hw->flash_address + reg);
277}
278
279#define er16flash(reg) __er16flash(hw, (reg))
280#define er32flash(reg) __er32flash(hw, (reg))
281#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
282#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
283
284static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
285{
286 u32 ctrl;
287
288 ctrl = er32(CTRL);
289 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
290 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
291 ew32(CTRL, ctrl);
292 e1e_flush();
293 udelay(10);
294 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
295 ew32(CTRL, ctrl);
296}
297
298/**
299 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
300 * @hw: pointer to the HW structure
301 *
302 * Initialize family-specific PHY parameters and function pointers.
303 **/
304static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
305{
306 struct e1000_phy_info *phy = &hw->phy;
307 u32 fwsm;
308 s32 ret_val = 0;
309
310 phy->addr = 1;
311 phy->reset_delay_us = 100;
312
313 phy->ops.set_page = e1000_set_page_igp;
314 phy->ops.read_reg = e1000_read_phy_reg_hv;
315 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
316 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
317 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
318 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
319 phy->ops.write_reg = e1000_write_phy_reg_hv;
320 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
321 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
322 phy->ops.power_up = e1000_power_up_phy_copper;
323 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
324 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
325
326 /*
327 * The MAC-PHY interconnect may still be in SMBus mode
328 * after Sx->S0. If the manageability engine (ME) is
329 * disabled, then toggle the LANPHYPC Value bit to force
330 * the interconnect to PCIe mode.
331 */
332 fwsm = er32(FWSM);
333 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
334 e1000_toggle_lanphypc_value_ich8lan(hw);
335 msleep(50);
336
337 /*
338 * Gate automatic PHY configuration by hardware on
339 * non-managed 82579
340 */
341 if (hw->mac.type == e1000_pch2lan)
342 e1000_gate_hw_phy_config_ich8lan(hw, true);
343 }
344
345 /*
346 * Reset the PHY before any access to it. Doing so, ensures that
347 * the PHY is in a known good state before we read/write PHY registers.
348 * The generic reset is sufficient here, because we haven't determined
349 * the PHY type yet.
350 */
351 ret_val = e1000e_phy_hw_reset_generic(hw);
352 if (ret_val)
353 goto out;
354
355 /* Ungate automatic PHY configuration on non-managed 82579 */
356 if ((hw->mac.type == e1000_pch2lan) &&
357 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
358 usleep_range(10000, 20000);
359 e1000_gate_hw_phy_config_ich8lan(hw, false);
360 }
361
362 phy->id = e1000_phy_unknown;
363 switch (hw->mac.type) {
364 default:
365 ret_val = e1000e_get_phy_id(hw);
366 if (ret_val)
367 goto out;
368 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
369 break;
370 /* fall-through */
371 case e1000_pch2lan:
372 /*
373 * In case the PHY needs to be in mdio slow mode,
374 * set slow mode and try to get the PHY id again.
375 */
376 ret_val = e1000_set_mdio_slow_mode_hv(hw);
377 if (ret_val)
378 goto out;
379 ret_val = e1000e_get_phy_id(hw);
380 if (ret_val)
381 goto out;
382 break;
383 }
384 phy->type = e1000e_get_phy_type_from_id(phy->id);
385
386 switch (phy->type) {
387 case e1000_phy_82577:
388 case e1000_phy_82579:
389 phy->ops.check_polarity = e1000_check_polarity_82577;
390 phy->ops.force_speed_duplex =
391 e1000_phy_force_speed_duplex_82577;
392 phy->ops.get_cable_length = e1000_get_cable_length_82577;
393 phy->ops.get_info = e1000_get_phy_info_82577;
394 phy->ops.commit = e1000e_phy_sw_reset;
395 break;
396 case e1000_phy_82578:
397 phy->ops.check_polarity = e1000_check_polarity_m88;
398 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
399 phy->ops.get_cable_length = e1000e_get_cable_length_m88;
400 phy->ops.get_info = e1000e_get_phy_info_m88;
401 break;
402 default:
403 ret_val = -E1000_ERR_PHY;
404 break;
405 }
406
407out:
408 return ret_val;
409}
410
411/**
412 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
413 * @hw: pointer to the HW structure
414 *
415 * Initialize family-specific PHY parameters and function pointers.
416 **/
417static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
418{
419 struct e1000_phy_info *phy = &hw->phy;
420 s32 ret_val;
421 u16 i = 0;
422
423 phy->addr = 1;
424 phy->reset_delay_us = 100;
425
426 phy->ops.power_up = e1000_power_up_phy_copper;
427 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
428
429 /*
430 * We may need to do this twice - once for IGP and if that fails,
431 * we'll set BM func pointers and try again
432 */
433 ret_val = e1000e_determine_phy_address(hw);
434 if (ret_val) {
435 phy->ops.write_reg = e1000e_write_phy_reg_bm;
436 phy->ops.read_reg = e1000e_read_phy_reg_bm;
437 ret_val = e1000e_determine_phy_address(hw);
438 if (ret_val) {
439 e_dbg("Cannot determine PHY addr. Erroring out\n");
440 return ret_val;
441 }
442 }
443
444 phy->id = 0;
445 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
446 (i++ < 100)) {
447 usleep_range(1000, 2000);
448 ret_val = e1000e_get_phy_id(hw);
449 if (ret_val)
450 return ret_val;
451 }
452
453 /* Verify phy id */
454 switch (phy->id) {
455 case IGP03E1000_E_PHY_ID:
456 phy->type = e1000_phy_igp_3;
457 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
458 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
459 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
460 phy->ops.get_info = e1000e_get_phy_info_igp;
461 phy->ops.check_polarity = e1000_check_polarity_igp;
462 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
463 break;
464 case IFE_E_PHY_ID:
465 case IFE_PLUS_E_PHY_ID:
466 case IFE_C_E_PHY_ID:
467 phy->type = e1000_phy_ife;
468 phy->autoneg_mask = E1000_ALL_NOT_GIG;
469 phy->ops.get_info = e1000_get_phy_info_ife;
470 phy->ops.check_polarity = e1000_check_polarity_ife;
471 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
472 break;
473 case BME1000_E_PHY_ID:
474 phy->type = e1000_phy_bm;
475 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
476 phy->ops.read_reg = e1000e_read_phy_reg_bm;
477 phy->ops.write_reg = e1000e_write_phy_reg_bm;
478 phy->ops.commit = e1000e_phy_sw_reset;
479 phy->ops.get_info = e1000e_get_phy_info_m88;
480 phy->ops.check_polarity = e1000_check_polarity_m88;
481 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
482 break;
483 default:
484 return -E1000_ERR_PHY;
485 break;
486 }
487
488 return 0;
489}
490
491/**
492 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
493 * @hw: pointer to the HW structure
494 *
495 * Initialize family-specific NVM parameters and function
496 * pointers.
497 **/
498static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
499{
500 struct e1000_nvm_info *nvm = &hw->nvm;
501 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
502 u32 gfpreg, sector_base_addr, sector_end_addr;
503 u16 i;
504
505 /* Can't read flash registers if the register set isn't mapped. */
506 if (!hw->flash_address) {
507 e_dbg("ERROR: Flash registers not mapped\n");
508 return -E1000_ERR_CONFIG;
509 }
510
511 nvm->type = e1000_nvm_flash_sw;
512
513 gfpreg = er32flash(ICH_FLASH_GFPREG);
514
515 /*
516 * sector_X_addr is a "sector"-aligned address (4096 bytes)
517 * Add 1 to sector_end_addr since this sector is included in
518 * the overall size.
519 */
520 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
521 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
522
523 /* flash_base_addr is byte-aligned */
524 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
525
526 /*
527 * find total size of the NVM, then cut in half since the total
528 * size represents two separate NVM banks.
529 */
530 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
531 << FLASH_SECTOR_ADDR_SHIFT;
532 nvm->flash_bank_size /= 2;
533 /* Adjust to word count */
534 nvm->flash_bank_size /= sizeof(u16);
535
536 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
537
538 /* Clear shadow ram */
539 for (i = 0; i < nvm->word_size; i++) {
540 dev_spec->shadow_ram[i].modified = false;
541 dev_spec->shadow_ram[i].value = 0xFFFF;
542 }
543
544 return 0;
545}
546
547/**
548 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
549 * @hw: pointer to the HW structure
550 *
551 * Initialize family-specific MAC parameters and function
552 * pointers.
553 **/
554static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
555{
556 struct e1000_hw *hw = &adapter->hw;
557 struct e1000_mac_info *mac = &hw->mac;
558
559 /* Set media type function pointer */
560 hw->phy.media_type = e1000_media_type_copper;
561
562 /* Set mta register count */
563 mac->mta_reg_count = 32;
564 /* Set rar entry count */
565 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
566 if (mac->type == e1000_ich8lan)
567 mac->rar_entry_count--;
568 /* FWSM register */
569 mac->has_fwsm = true;
570 /* ARC subsystem not supported */
571 mac->arc_subsystem_valid = false;
572 /* Adaptive IFS supported */
573 mac->adaptive_ifs = true;
574
575 /* LED operations */
576 switch (mac->type) {
577 case e1000_ich8lan:
578 case e1000_ich9lan:
579 case e1000_ich10lan:
580 /* check management mode */
581 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
582 /* ID LED init */
583 mac->ops.id_led_init = e1000e_id_led_init;
584 /* blink LED */
585 mac->ops.blink_led = e1000e_blink_led_generic;
586 /* setup LED */
587 mac->ops.setup_led = e1000e_setup_led_generic;
588 /* cleanup LED */
589 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
590 /* turn on/off LED */
591 mac->ops.led_on = e1000_led_on_ich8lan;
592 mac->ops.led_off = e1000_led_off_ich8lan;
593 break;
594 case e1000_pchlan:
595 case e1000_pch2lan:
596 /* check management mode */
597 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
598 /* ID LED init */
599 mac->ops.id_led_init = e1000_id_led_init_pchlan;
600 /* setup LED */
601 mac->ops.setup_led = e1000_setup_led_pchlan;
602 /* cleanup LED */
603 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
604 /* turn on/off LED */
605 mac->ops.led_on = e1000_led_on_pchlan;
606 mac->ops.led_off = e1000_led_off_pchlan;
607 break;
608 default:
609 break;
610 }
611
612 /* Enable PCS Lock-loss workaround for ICH8 */
613 if (mac->type == e1000_ich8lan)
614 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
615
616 /* Gate automatic PHY configuration by hardware on managed 82579 */
617 if ((mac->type == e1000_pch2lan) &&
618 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
619 e1000_gate_hw_phy_config_ich8lan(hw, true);
620
621 return 0;
622}
623
624/**
625 * e1000_set_eee_pchlan - Enable/disable EEE support
626 * @hw: pointer to the HW structure
627 *
628 * Enable/disable EEE based on setting in dev_spec structure. The bits in
629 * the LPI Control register will remain set only if/when link is up.
630 **/
631static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
632{
633 s32 ret_val = 0;
634 u16 phy_reg;
635
636 if (hw->phy.type != e1000_phy_82579)
637 goto out;
638
639 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
640 if (ret_val)
641 goto out;
642
643 if (hw->dev_spec.ich8lan.eee_disable)
644 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
645 else
646 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
647
648 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
649out:
650 return ret_val;
651}
652
653/**
654 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
655 * @hw: pointer to the HW structure
656 *
657 * Checks to see of the link status of the hardware has changed. If a
658 * change in link status has been detected, then we read the PHY registers
659 * to get the current speed/duplex if link exists.
660 **/
661static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
662{
663 struct e1000_mac_info *mac = &hw->mac;
664 s32 ret_val;
665 bool link;
666 u16 phy_reg;
667
668 /*
669 * We only want to go out to the PHY registers to see if Auto-Neg
670 * has completed and/or if our link status has changed. The
671 * get_link_status flag is set upon receiving a Link Status
672 * Change or Rx Sequence Error interrupt.
673 */
674 if (!mac->get_link_status) {
675 ret_val = 0;
676 goto out;
677 }
678
679 /*
680 * First we want to see if the MII Status Register reports
681 * link. If so, then we want to get the current speed/duplex
682 * of the PHY.
683 */
684 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
685 if (ret_val)
686 goto out;
687
688 if (hw->mac.type == e1000_pchlan) {
689 ret_val = e1000_k1_gig_workaround_hv(hw, link);
690 if (ret_val)
691 goto out;
692 }
693
694 if (!link)
695 goto out; /* No link detected */
696
697 mac->get_link_status = false;
698
699 switch (hw->mac.type) {
700 case e1000_pch2lan:
701 ret_val = e1000_k1_workaround_lv(hw);
702 if (ret_val)
703 goto out;
704 /* fall-thru */
705 case e1000_pchlan:
706 if (hw->phy.type == e1000_phy_82578) {
707 ret_val = e1000_link_stall_workaround_hv(hw);
708 if (ret_val)
709 goto out;
710 }
711
712 /*
713 * Workaround for PCHx parts in half-duplex:
714 * Set the number of preambles removed from the packet
715 * when it is passed from the PHY to the MAC to prevent
716 * the MAC from misinterpreting the packet type.
717 */
718 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
719 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
720
721 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
722 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
723
724 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
725 break;
726 default:
727 break;
728 }
729
730 /*
731 * Check if there was DownShift, must be checked
732 * immediately after link-up
733 */
734 e1000e_check_downshift(hw);
735
736 /* Enable/Disable EEE after link up */
737 ret_val = e1000_set_eee_pchlan(hw);
738 if (ret_val)
739 goto out;
740
741 /*
742 * If we are forcing speed/duplex, then we simply return since
743 * we have already determined whether we have link or not.
744 */
745 if (!mac->autoneg) {
746 ret_val = -E1000_ERR_CONFIG;
747 goto out;
748 }
749
750 /*
751 * Auto-Neg is enabled. Auto Speed Detection takes care
752 * of MAC speed/duplex configuration. So we only need to
753 * configure Collision Distance in the MAC.
754 */
755 e1000e_config_collision_dist(hw);
756
757 /*
758 * Configure Flow Control now that Auto-Neg has completed.
759 * First, we need to restore the desired flow control
760 * settings because we may have had to re-autoneg with a
761 * different link partner.
762 */
763 ret_val = e1000e_config_fc_after_link_up(hw);
764 if (ret_val)
765 e_dbg("Error configuring flow control\n");
766
767out:
768 return ret_val;
769}
770
771static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
772{
773 struct e1000_hw *hw = &adapter->hw;
774 s32 rc;
775
776 rc = e1000_init_mac_params_ich8lan(adapter);
777 if (rc)
778 return rc;
779
780 rc = e1000_init_nvm_params_ich8lan(hw);
781 if (rc)
782 return rc;
783
784 switch (hw->mac.type) {
785 case e1000_ich8lan:
786 case e1000_ich9lan:
787 case e1000_ich10lan:
788 rc = e1000_init_phy_params_ich8lan(hw);
789 break;
790 case e1000_pchlan:
791 case e1000_pch2lan:
792 rc = e1000_init_phy_params_pchlan(hw);
793 break;
794 default:
795 break;
796 }
797 if (rc)
798 return rc;
799
800 /*
801 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
802 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
803 */
804 if ((adapter->hw.phy.type == e1000_phy_ife) ||
805 ((adapter->hw.mac.type >= e1000_pch2lan) &&
806 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
807 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
808 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
809
810 hw->mac.ops.blink_led = NULL;
811 }
812
813 if ((adapter->hw.mac.type == e1000_ich8lan) &&
814 (adapter->hw.phy.type == e1000_phy_igp_3))
815 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
816
817 /* Enable workaround for 82579 w/ ME enabled */
818 if ((adapter->hw.mac.type == e1000_pch2lan) &&
819 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
820 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
821
822 /* Disable EEE by default until IEEE802.3az spec is finalized */
823 if (adapter->flags2 & FLAG2_HAS_EEE)
824 adapter->hw.dev_spec.ich8lan.eee_disable = true;
825
826 return 0;
827}
828
829static DEFINE_MUTEX(nvm_mutex);
830
831/**
832 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
833 * @hw: pointer to the HW structure
834 *
835 * Acquires the mutex for performing NVM operations.
836 **/
837static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
838{
839 mutex_lock(&nvm_mutex);
840
841 return 0;
842}
843
844/**
845 * e1000_release_nvm_ich8lan - Release NVM mutex
846 * @hw: pointer to the HW structure
847 *
848 * Releases the mutex used while performing NVM operations.
849 **/
850static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
851{
852 mutex_unlock(&nvm_mutex);
853}
854
855static DEFINE_MUTEX(swflag_mutex);
856
857/**
858 * e1000_acquire_swflag_ich8lan - Acquire software control flag
859 * @hw: pointer to the HW structure
860 *
861 * Acquires the software control flag for performing PHY and select
862 * MAC CSR accesses.
863 **/
864static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
865{
866 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
867 s32 ret_val = 0;
868
869 mutex_lock(&swflag_mutex);
870
871 while (timeout) {
872 extcnf_ctrl = er32(EXTCNF_CTRL);
873 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
874 break;
875
876 mdelay(1);
877 timeout--;
878 }
879
880 if (!timeout) {
881 e_dbg("SW/FW/HW has locked the resource for too long.\n");
882 ret_val = -E1000_ERR_CONFIG;
883 goto out;
884 }
885
886 timeout = SW_FLAG_TIMEOUT;
887
888 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
889 ew32(EXTCNF_CTRL, extcnf_ctrl);
890
891 while (timeout) {
892 extcnf_ctrl = er32(EXTCNF_CTRL);
893 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
894 break;
895
896 mdelay(1);
897 timeout--;
898 }
899
900 if (!timeout) {
901 e_dbg("Failed to acquire the semaphore.\n");
902 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
903 ew32(EXTCNF_CTRL, extcnf_ctrl);
904 ret_val = -E1000_ERR_CONFIG;
905 goto out;
906 }
907
908out:
909 if (ret_val)
910 mutex_unlock(&swflag_mutex);
911
912 return ret_val;
913}
914
915/**
916 * e1000_release_swflag_ich8lan - Release software control flag
917 * @hw: pointer to the HW structure
918 *
919 * Releases the software control flag for performing PHY and select
920 * MAC CSR accesses.
921 **/
922static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
923{
924 u32 extcnf_ctrl;
925
926 extcnf_ctrl = er32(EXTCNF_CTRL);
927
928 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
929 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
930 ew32(EXTCNF_CTRL, extcnf_ctrl);
931 } else {
932 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
933 }
934
935 mutex_unlock(&swflag_mutex);
936}
937
938/**
939 * e1000_check_mng_mode_ich8lan - Checks management mode
940 * @hw: pointer to the HW structure
941 *
942 * This checks if the adapter has any manageability enabled.
943 * This is a function pointer entry point only called by read/write
944 * routines for the PHY and NVM parts.
945 **/
946static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
947{
948 u32 fwsm;
949
950 fwsm = er32(FWSM);
951 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
952 ((fwsm & E1000_FWSM_MODE_MASK) ==
953 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
954}
955
956/**
957 * e1000_check_mng_mode_pchlan - Checks management mode
958 * @hw: pointer to the HW structure
959 *
960 * This checks if the adapter has iAMT enabled.
961 * This is a function pointer entry point only called by read/write
962 * routines for the PHY and NVM parts.
963 **/
964static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
965{
966 u32 fwsm;
967
968 fwsm = er32(FWSM);
969 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
970 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
971}
972
973/**
974 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
975 * @hw: pointer to the HW structure
976 *
977 * Checks if firmware is blocking the reset of the PHY.
978 * This is a function pointer entry point only called by
979 * reset routines.
980 **/
981static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
982{
983 u32 fwsm;
984
985 fwsm = er32(FWSM);
986
987 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
988}
989
990/**
991 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
992 * @hw: pointer to the HW structure
993 *
994 * Assumes semaphore already acquired.
995 *
996 **/
997static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
998{
999 u16 phy_data;
1000 u32 strap = er32(STRAP);
1001 s32 ret_val = 0;
1002
1003 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1004
1005 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1006 if (ret_val)
1007 goto out;
1008
1009 phy_data &= ~HV_SMB_ADDR_MASK;
1010 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1011 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1012 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1013
1014out:
1015 return ret_val;
1016}
1017
1018/**
1019 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1020 * @hw: pointer to the HW structure
1021 *
1022 * SW should configure the LCD from the NVM extended configuration region
1023 * as a workaround for certain parts.
1024 **/
1025static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1026{
1027 struct e1000_phy_info *phy = &hw->phy;
1028 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1029 s32 ret_val = 0;
1030 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1031
1032 /*
1033 * Initialize the PHY from the NVM on ICH platforms. This
1034 * is needed due to an issue where the NVM configuration is
1035 * not properly autoloaded after power transitions.
1036 * Therefore, after each PHY reset, we will load the
1037 * configuration data out of the NVM manually.
1038 */
1039 switch (hw->mac.type) {
1040 case e1000_ich8lan:
1041 if (phy->type != e1000_phy_igp_3)
1042 return ret_val;
1043
1044 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1045 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1046 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1047 break;
1048 }
1049 /* Fall-thru */
1050 case e1000_pchlan:
1051 case e1000_pch2lan:
1052 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1053 break;
1054 default:
1055 return ret_val;
1056 }
1057
1058 ret_val = hw->phy.ops.acquire(hw);
1059 if (ret_val)
1060 return ret_val;
1061
1062 data = er32(FEXTNVM);
1063 if (!(data & sw_cfg_mask))
1064 goto out;
1065
1066 /*
1067 * Make sure HW does not configure LCD from PHY
1068 * extended configuration before SW configuration
1069 */
1070 data = er32(EXTCNF_CTRL);
1071 if (!(hw->mac.type == e1000_pch2lan)) {
1072 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1073 goto out;
1074 }
1075
1076 cnf_size = er32(EXTCNF_SIZE);
1077 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1078 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1079 if (!cnf_size)
1080 goto out;
1081
1082 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1083 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1084
1085 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1086 (hw->mac.type == e1000_pchlan)) ||
1087 (hw->mac.type == e1000_pch2lan)) {
1088 /*
1089 * HW configures the SMBus address and LEDs when the
1090 * OEM and LCD Write Enable bits are set in the NVM.
1091 * When both NVM bits are cleared, SW will configure
1092 * them instead.
1093 */
1094 ret_val = e1000_write_smbus_addr(hw);
1095 if (ret_val)
1096 goto out;
1097
1098 data = er32(LEDCTL);
1099 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1100 (u16)data);
1101 if (ret_val)
1102 goto out;
1103 }
1104
1105 /* Configure LCD from extended configuration region. */
1106
1107 /* cnf_base_addr is in DWORD */
1108 word_addr = (u16)(cnf_base_addr << 1);
1109
1110 for (i = 0; i < cnf_size; i++) {
1111 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1112 &reg_data);
1113 if (ret_val)
1114 goto out;
1115
1116 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1117 1, &reg_addr);
1118 if (ret_val)
1119 goto out;
1120
1121 /* Save off the PHY page for future writes. */
1122 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1123 phy_page = reg_data;
1124 continue;
1125 }
1126
1127 reg_addr &= PHY_REG_MASK;
1128 reg_addr |= phy_page;
1129
1130 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1131 reg_data);
1132 if (ret_val)
1133 goto out;
1134 }
1135
1136out:
1137 hw->phy.ops.release(hw);
1138 return ret_val;
1139}
1140
1141/**
1142 * e1000_k1_gig_workaround_hv - K1 Si workaround
1143 * @hw: pointer to the HW structure
1144 * @link: link up bool flag
1145 *
1146 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1147 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1148 * If link is down, the function will restore the default K1 setting located
1149 * in the NVM.
1150 **/
1151static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1152{
1153 s32 ret_val = 0;
1154 u16 status_reg = 0;
1155 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1156
1157 if (hw->mac.type != e1000_pchlan)
1158 goto out;
1159
1160 /* Wrap the whole flow with the sw flag */
1161 ret_val = hw->phy.ops.acquire(hw);
1162 if (ret_val)
1163 goto out;
1164
1165 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1166 if (link) {
1167 if (hw->phy.type == e1000_phy_82578) {
1168 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1169 &status_reg);
1170 if (ret_val)
1171 goto release;
1172
1173 status_reg &= BM_CS_STATUS_LINK_UP |
1174 BM_CS_STATUS_RESOLVED |
1175 BM_CS_STATUS_SPEED_MASK;
1176
1177 if (status_reg == (BM_CS_STATUS_LINK_UP |
1178 BM_CS_STATUS_RESOLVED |
1179 BM_CS_STATUS_SPEED_1000))
1180 k1_enable = false;
1181 }
1182
1183 if (hw->phy.type == e1000_phy_82577) {
1184 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1185 &status_reg);
1186 if (ret_val)
1187 goto release;
1188
1189 status_reg &= HV_M_STATUS_LINK_UP |
1190 HV_M_STATUS_AUTONEG_COMPLETE |
1191 HV_M_STATUS_SPEED_MASK;
1192
1193 if (status_reg == (HV_M_STATUS_LINK_UP |
1194 HV_M_STATUS_AUTONEG_COMPLETE |
1195 HV_M_STATUS_SPEED_1000))
1196 k1_enable = false;
1197 }
1198
1199 /* Link stall fix for link up */
1200 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1201 0x0100);
1202 if (ret_val)
1203 goto release;
1204
1205 } else {
1206 /* Link stall fix for link down */
1207 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1208 0x4100);
1209 if (ret_val)
1210 goto release;
1211 }
1212
1213 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1214
1215release:
1216 hw->phy.ops.release(hw);
1217out:
1218 return ret_val;
1219}
1220
1221/**
1222 * e1000_configure_k1_ich8lan - Configure K1 power state
1223 * @hw: pointer to the HW structure
1224 * @enable: K1 state to configure
1225 *
1226 * Configure the K1 power state based on the provided parameter.
1227 * Assumes semaphore already acquired.
1228 *
1229 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1230 **/
1231s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1232{
1233 s32 ret_val = 0;
1234 u32 ctrl_reg = 0;
1235 u32 ctrl_ext = 0;
1236 u32 reg = 0;
1237 u16 kmrn_reg = 0;
1238
1239 ret_val = e1000e_read_kmrn_reg_locked(hw,
1240 E1000_KMRNCTRLSTA_K1_CONFIG,
1241 &kmrn_reg);
1242 if (ret_val)
1243 goto out;
1244
1245 if (k1_enable)
1246 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1247 else
1248 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1249
1250 ret_val = e1000e_write_kmrn_reg_locked(hw,
1251 E1000_KMRNCTRLSTA_K1_CONFIG,
1252 kmrn_reg);
1253 if (ret_val)
1254 goto out;
1255
1256 udelay(20);
1257 ctrl_ext = er32(CTRL_EXT);
1258 ctrl_reg = er32(CTRL);
1259
1260 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1261 reg |= E1000_CTRL_FRCSPD;
1262 ew32(CTRL, reg);
1263
1264 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1265 e1e_flush();
1266 udelay(20);
1267 ew32(CTRL, ctrl_reg);
1268 ew32(CTRL_EXT, ctrl_ext);
1269 e1e_flush();
1270 udelay(20);
1271
1272out:
1273 return ret_val;
1274}
1275
1276/**
1277 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1278 * @hw: pointer to the HW structure
1279 * @d0_state: boolean if entering d0 or d3 device state
1280 *
1281 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1282 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1283 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1284 **/
1285static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1286{
1287 s32 ret_val = 0;
1288 u32 mac_reg;
1289 u16 oem_reg;
1290
1291 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1292 return ret_val;
1293
1294 ret_val = hw->phy.ops.acquire(hw);
1295 if (ret_val)
1296 return ret_val;
1297
1298 if (!(hw->mac.type == e1000_pch2lan)) {
1299 mac_reg = er32(EXTCNF_CTRL);
1300 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1301 goto out;
1302 }
1303
1304 mac_reg = er32(FEXTNVM);
1305 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1306 goto out;
1307
1308 mac_reg = er32(PHY_CTRL);
1309
1310 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1311 if (ret_val)
1312 goto out;
1313
1314 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1315
1316 if (d0_state) {
1317 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1318 oem_reg |= HV_OEM_BITS_GBE_DIS;
1319
1320 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1321 oem_reg |= HV_OEM_BITS_LPLU;
1322 } else {
1323 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1324 oem_reg |= HV_OEM_BITS_GBE_DIS;
1325
1326 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1327 oem_reg |= HV_OEM_BITS_LPLU;
1328 }
1329 /* Restart auto-neg to activate the bits */
1330 if (!e1000_check_reset_block(hw))
1331 oem_reg |= HV_OEM_BITS_RESTART_AN;
1332 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1333
1334out:
1335 hw->phy.ops.release(hw);
1336
1337 return ret_val;
1338}
1339
1340
1341/**
1342 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1343 * @hw: pointer to the HW structure
1344 **/
1345static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1346{
1347 s32 ret_val;
1348 u16 data;
1349
1350 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1351 if (ret_val)
1352 return ret_val;
1353
1354 data |= HV_KMRN_MDIO_SLOW;
1355
1356 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1357
1358 return ret_val;
1359}
1360
1361/**
1362 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1363 * done after every PHY reset.
1364 **/
1365static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1366{
1367 s32 ret_val = 0;
1368 u16 phy_data;
1369
1370 if (hw->mac.type != e1000_pchlan)
1371 return ret_val;
1372
1373 /* Set MDIO slow mode before any other MDIO access */
1374 if (hw->phy.type == e1000_phy_82577) {
1375 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1376 if (ret_val)
1377 goto out;
1378 }
1379
1380 if (((hw->phy.type == e1000_phy_82577) &&
1381 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1382 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1383 /* Disable generation of early preamble */
1384 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1385 if (ret_val)
1386 return ret_val;
1387
1388 /* Preamble tuning for SSC */
1389 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1390 if (ret_val)
1391 return ret_val;
1392 }
1393
1394 if (hw->phy.type == e1000_phy_82578) {
1395 /*
1396 * Return registers to default by doing a soft reset then
1397 * writing 0x3140 to the control register.
1398 */
1399 if (hw->phy.revision < 2) {
1400 e1000e_phy_sw_reset(hw);
1401 ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
1402 }
1403 }
1404
1405 /* Select page 0 */
1406 ret_val = hw->phy.ops.acquire(hw);
1407 if (ret_val)
1408 return ret_val;
1409
1410 hw->phy.addr = 1;
1411 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1412 hw->phy.ops.release(hw);
1413 if (ret_val)
1414 goto out;
1415
1416 /*
1417 * Configure the K1 Si workaround during phy reset assuming there is
1418 * link so that it disables K1 if link is in 1Gbps.
1419 */
1420 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1421 if (ret_val)
1422 goto out;
1423
1424 /* Workaround for link disconnects on a busy hub in half duplex */
1425 ret_val = hw->phy.ops.acquire(hw);
1426 if (ret_val)
1427 goto out;
1428 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1429 if (ret_val)
1430 goto release;
1431 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1432 phy_data & 0x00FF);
1433release:
1434 hw->phy.ops.release(hw);
1435out:
1436 return ret_val;
1437}
1438
1439/**
1440 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1441 * @hw: pointer to the HW structure
1442 **/
1443void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1444{
1445 u32 mac_reg;
1446 u16 i, phy_reg = 0;
1447 s32 ret_val;
1448
1449 ret_val = hw->phy.ops.acquire(hw);
1450 if (ret_val)
1451 return;
1452 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1453 if (ret_val)
1454 goto release;
1455
1456 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1457 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1458 mac_reg = er32(RAL(i));
1459 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1460 (u16)(mac_reg & 0xFFFF));
1461 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1462 (u16)((mac_reg >> 16) & 0xFFFF));
1463
1464 mac_reg = er32(RAH(i));
1465 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1466 (u16)(mac_reg & 0xFFFF));
1467 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1468 (u16)((mac_reg & E1000_RAH_AV)
1469 >> 16));
1470 }
1471
1472 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1473
1474release:
1475 hw->phy.ops.release(hw);
1476}
1477
1478/**
1479 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1480 * with 82579 PHY
1481 * @hw: pointer to the HW structure
1482 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1483 **/
1484s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1485{
1486 s32 ret_val = 0;
1487 u16 phy_reg, data;
1488 u32 mac_reg;
1489 u16 i;
1490
1491 if (hw->mac.type != e1000_pch2lan)
1492 goto out;
1493
1494 /* disable Rx path while enabling/disabling workaround */
1495 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1496 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1497 if (ret_val)
1498 goto out;
1499
1500 if (enable) {
1501 /*
1502 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1503 * SHRAL/H) and initial CRC values to the MAC
1504 */
1505 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1506 u8 mac_addr[ETH_ALEN] = {0};
1507 u32 addr_high, addr_low;
1508
1509 addr_high = er32(RAH(i));
1510 if (!(addr_high & E1000_RAH_AV))
1511 continue;
1512 addr_low = er32(RAL(i));
1513 mac_addr[0] = (addr_low & 0xFF);
1514 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1515 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1516 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1517 mac_addr[4] = (addr_high & 0xFF);
1518 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1519
1520 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1521 }
1522
1523 /* Write Rx addresses to the PHY */
1524 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1525
1526 /* Enable jumbo frame workaround in the MAC */
1527 mac_reg = er32(FFLT_DBG);
1528 mac_reg &= ~(1 << 14);
1529 mac_reg |= (7 << 15);
1530 ew32(FFLT_DBG, mac_reg);
1531
1532 mac_reg = er32(RCTL);
1533 mac_reg |= E1000_RCTL_SECRC;
1534 ew32(RCTL, mac_reg);
1535
1536 ret_val = e1000e_read_kmrn_reg(hw,
1537 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1538 &data);
1539 if (ret_val)
1540 goto out;
1541 ret_val = e1000e_write_kmrn_reg(hw,
1542 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1543 data | (1 << 0));
1544 if (ret_val)
1545 goto out;
1546 ret_val = e1000e_read_kmrn_reg(hw,
1547 E1000_KMRNCTRLSTA_HD_CTRL,
1548 &data);
1549 if (ret_val)
1550 goto out;
1551 data &= ~(0xF << 8);
1552 data |= (0xB << 8);
1553 ret_val = e1000e_write_kmrn_reg(hw,
1554 E1000_KMRNCTRLSTA_HD_CTRL,
1555 data);
1556 if (ret_val)
1557 goto out;
1558
1559 /* Enable jumbo frame workaround in the PHY */
1560 e1e_rphy(hw, PHY_REG(769, 23), &data);
1561 data &= ~(0x7F << 5);
1562 data |= (0x37 << 5);
1563 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1564 if (ret_val)
1565 goto out;
1566 e1e_rphy(hw, PHY_REG(769, 16), &data);
1567 data &= ~(1 << 13);
1568 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1569 if (ret_val)
1570 goto out;
1571 e1e_rphy(hw, PHY_REG(776, 20), &data);
1572 data &= ~(0x3FF << 2);
1573 data |= (0x1A << 2);
1574 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1575 if (ret_val)
1576 goto out;
1577 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
1578 if (ret_val)
1579 goto out;
1580 e1e_rphy(hw, HV_PM_CTRL, &data);
1581 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1582 if (ret_val)
1583 goto out;
1584 } else {
1585 /* Write MAC register values back to h/w defaults */
1586 mac_reg = er32(FFLT_DBG);
1587 mac_reg &= ~(0xF << 14);
1588 ew32(FFLT_DBG, mac_reg);
1589
1590 mac_reg = er32(RCTL);
1591 mac_reg &= ~E1000_RCTL_SECRC;
1592 ew32(RCTL, mac_reg);
1593
1594 ret_val = e1000e_read_kmrn_reg(hw,
1595 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1596 &data);
1597 if (ret_val)
1598 goto out;
1599 ret_val = e1000e_write_kmrn_reg(hw,
1600 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1601 data & ~(1 << 0));
1602 if (ret_val)
1603 goto out;
1604 ret_val = e1000e_read_kmrn_reg(hw,
1605 E1000_KMRNCTRLSTA_HD_CTRL,
1606 &data);
1607 if (ret_val)
1608 goto out;
1609 data &= ~(0xF << 8);
1610 data |= (0xB << 8);
1611 ret_val = e1000e_write_kmrn_reg(hw,
1612 E1000_KMRNCTRLSTA_HD_CTRL,
1613 data);
1614 if (ret_val)
1615 goto out;
1616
1617 /* Write PHY register values back to h/w defaults */
1618 e1e_rphy(hw, PHY_REG(769, 23), &data);
1619 data &= ~(0x7F << 5);
1620 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1621 if (ret_val)
1622 goto out;
1623 e1e_rphy(hw, PHY_REG(769, 16), &data);
1624 data |= (1 << 13);
1625 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1626 if (ret_val)
1627 goto out;
1628 e1e_rphy(hw, PHY_REG(776, 20), &data);
1629 data &= ~(0x3FF << 2);
1630 data |= (0x8 << 2);
1631 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1632 if (ret_val)
1633 goto out;
1634 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1635 if (ret_val)
1636 goto out;
1637 e1e_rphy(hw, HV_PM_CTRL, &data);
1638 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1639 if (ret_val)
1640 goto out;
1641 }
1642
1643 /* re-enable Rx path after enabling/disabling workaround */
1644 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1645
1646out:
1647 return ret_val;
1648}
1649
1650/**
1651 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1652 * done after every PHY reset.
1653 **/
1654static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1655{
1656 s32 ret_val = 0;
1657
1658 if (hw->mac.type != e1000_pch2lan)
1659 goto out;
1660
1661 /* Set MDIO slow mode before any other MDIO access */
1662 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1663
1664out:
1665 return ret_val;
1666}
1667
1668/**
1669 * e1000_k1_gig_workaround_lv - K1 Si workaround
1670 * @hw: pointer to the HW structure
1671 *
1672 * Workaround to set the K1 beacon duration for 82579 parts
1673 **/
1674static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1675{
1676 s32 ret_val = 0;
1677 u16 status_reg = 0;
1678 u32 mac_reg;
1679 u16 phy_reg;
1680
1681 if (hw->mac.type != e1000_pch2lan)
1682 goto out;
1683
1684 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1685 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1686 if (ret_val)
1687 goto out;
1688
1689 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1690 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1691 mac_reg = er32(FEXTNVM4);
1692 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1693
1694 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1695 if (ret_val)
1696 goto out;
1697
1698 if (status_reg & HV_M_STATUS_SPEED_1000) {
1699 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1700 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1701 } else {
1702 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1703 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1704 }
1705 ew32(FEXTNVM4, mac_reg);
1706 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1707 }
1708
1709out:
1710 return ret_val;
1711}
1712
1713/**
1714 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1715 * @hw: pointer to the HW structure
1716 * @gate: boolean set to true to gate, false to ungate
1717 *
1718 * Gate/ungate the automatic PHY configuration via hardware; perform
1719 * the configuration via software instead.
1720 **/
1721static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1722{
1723 u32 extcnf_ctrl;
1724
1725 if (hw->mac.type != e1000_pch2lan)
1726 return;
1727
1728 extcnf_ctrl = er32(EXTCNF_CTRL);
1729
1730 if (gate)
1731 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1732 else
1733 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1734
1735 ew32(EXTCNF_CTRL, extcnf_ctrl);
1736 return;
1737}
1738
1739/**
1740 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1741 * @hw: pointer to the HW structure
1742 *
1743 * Check the appropriate indication the MAC has finished configuring the
1744 * PHY after a software reset.
1745 **/
1746static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1747{
1748 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1749
1750 /* Wait for basic configuration completes before proceeding */
1751 do {
1752 data = er32(STATUS);
1753 data &= E1000_STATUS_LAN_INIT_DONE;
1754 udelay(100);
1755 } while ((!data) && --loop);
1756
1757 /*
1758 * If basic configuration is incomplete before the above loop
1759 * count reaches 0, loading the configuration from NVM will
1760 * leave the PHY in a bad state possibly resulting in no link.
1761 */
1762 if (loop == 0)
1763 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
1764
1765 /* Clear the Init Done bit for the next init event */
1766 data = er32(STATUS);
1767 data &= ~E1000_STATUS_LAN_INIT_DONE;
1768 ew32(STATUS, data);
1769}
1770
1771/**
1772 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1773 * @hw: pointer to the HW structure
1774 **/
1775static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1776{
1777 s32 ret_val = 0;
1778 u16 reg;
1779
1780 if (e1000_check_reset_block(hw))
1781 goto out;
1782
1783 /* Allow time for h/w to get to quiescent state after reset */
1784 usleep_range(10000, 20000);
1785
1786 /* Perform any necessary post-reset workarounds */
1787 switch (hw->mac.type) {
1788 case e1000_pchlan:
1789 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1790 if (ret_val)
1791 goto out;
1792 break;
1793 case e1000_pch2lan:
1794 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1795 if (ret_val)
1796 goto out;
1797 break;
1798 default:
1799 break;
1800 }
1801
1802 /* Clear the host wakeup bit after lcd reset */
1803 if (hw->mac.type >= e1000_pchlan) {
1804 e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
1805 reg &= ~BM_WUC_HOST_WU_BIT;
1806 e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
1807 }
1808
1809 /* Configure the LCD with the extended configuration region in NVM */
1810 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1811 if (ret_val)
1812 goto out;
1813
1814 /* Configure the LCD with the OEM bits in NVM */
1815 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1816
1817 if (hw->mac.type == e1000_pch2lan) {
1818 /* Ungate automatic PHY configuration on non-managed 82579 */
1819 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1820 usleep_range(10000, 20000);
1821 e1000_gate_hw_phy_config_ich8lan(hw, false);
1822 }
1823
1824 /* Set EEE LPI Update Timer to 200usec */
1825 ret_val = hw->phy.ops.acquire(hw);
1826 if (ret_val)
1827 goto out;
1828 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1829 I82579_LPI_UPDATE_TIMER);
1830 if (ret_val)
1831 goto release;
1832 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1833 0x1387);
1834release:
1835 hw->phy.ops.release(hw);
1836 }
1837
1838out:
1839 return ret_val;
1840}
1841
1842/**
1843 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1844 * @hw: pointer to the HW structure
1845 *
1846 * Resets the PHY
1847 * This is a function pointer entry point called by drivers
1848 * or other shared routines.
1849 **/
1850static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1851{
1852 s32 ret_val = 0;
1853
1854 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1855 if ((hw->mac.type == e1000_pch2lan) &&
1856 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1857 e1000_gate_hw_phy_config_ich8lan(hw, true);
1858
1859 ret_val = e1000e_phy_hw_reset_generic(hw);
1860 if (ret_val)
1861 goto out;
1862
1863 ret_val = e1000_post_phy_reset_ich8lan(hw);
1864
1865out:
1866 return ret_val;
1867}
1868
1869/**
1870 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1871 * @hw: pointer to the HW structure
1872 * @active: true to enable LPLU, false to disable
1873 *
1874 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1875 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1876 * the phy speed. This function will manually set the LPLU bit and restart
1877 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1878 * since it configures the same bit.
1879 **/
1880static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1881{
1882 s32 ret_val = 0;
1883 u16 oem_reg;
1884
1885 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1886 if (ret_val)
1887 goto out;
1888
1889 if (active)
1890 oem_reg |= HV_OEM_BITS_LPLU;
1891 else
1892 oem_reg &= ~HV_OEM_BITS_LPLU;
1893
1894 oem_reg |= HV_OEM_BITS_RESTART_AN;
1895 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1896
1897out:
1898 return ret_val;
1899}
1900
1901/**
1902 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1903 * @hw: pointer to the HW structure
1904 * @active: true to enable LPLU, false to disable
1905 *
1906 * Sets the LPLU D0 state according to the active flag. When
1907 * activating LPLU this function also disables smart speed
1908 * and vice versa. LPLU will not be activated unless the
1909 * device autonegotiation advertisement meets standards of
1910 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1911 * This is a function pointer entry point only called by
1912 * PHY setup routines.
1913 **/
1914static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1915{
1916 struct e1000_phy_info *phy = &hw->phy;
1917 u32 phy_ctrl;
1918 s32 ret_val = 0;
1919 u16 data;
1920
1921 if (phy->type == e1000_phy_ife)
1922 return ret_val;
1923
1924 phy_ctrl = er32(PHY_CTRL);
1925
1926 if (active) {
1927 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
1928 ew32(PHY_CTRL, phy_ctrl);
1929
1930 if (phy->type != e1000_phy_igp_3)
1931 return 0;
1932
1933 /*
1934 * Call gig speed drop workaround on LPLU before accessing
1935 * any PHY registers
1936 */
1937 if (hw->mac.type == e1000_ich8lan)
1938 e1000e_gig_downshift_workaround_ich8lan(hw);
1939
1940 /* When LPLU is enabled, we should disable SmartSpeed */
1941 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1942 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1943 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1944 if (ret_val)
1945 return ret_val;
1946 } else {
1947 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
1948 ew32(PHY_CTRL, phy_ctrl);
1949
1950 if (phy->type != e1000_phy_igp_3)
1951 return 0;
1952
1953 /*
1954 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1955 * during Dx states where the power conservation is most
1956 * important. During driver activity we should enable
1957 * SmartSpeed, so performance is maintained.
1958 */
1959 if (phy->smart_speed == e1000_smart_speed_on) {
1960 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1961 &data);
1962 if (ret_val)
1963 return ret_val;
1964
1965 data |= IGP01E1000_PSCFR_SMART_SPEED;
1966 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1967 data);
1968 if (ret_val)
1969 return ret_val;
1970 } else if (phy->smart_speed == e1000_smart_speed_off) {
1971 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1972 &data);
1973 if (ret_val)
1974 return ret_val;
1975
1976 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1977 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1978 data);
1979 if (ret_val)
1980 return ret_val;
1981 }
1982 }
1983
1984 return 0;
1985}
1986
1987/**
1988 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1989 * @hw: pointer to the HW structure
1990 * @active: true to enable LPLU, false to disable
1991 *
1992 * Sets the LPLU D3 state according to the active flag. When
1993 * activating LPLU this function also disables smart speed
1994 * and vice versa. LPLU will not be activated unless the
1995 * device autonegotiation advertisement meets standards of
1996 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1997 * This is a function pointer entry point only called by
1998 * PHY setup routines.
1999 **/
2000static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2001{
2002 struct e1000_phy_info *phy = &hw->phy;
2003 u32 phy_ctrl;
2004 s32 ret_val;
2005 u16 data;
2006
2007 phy_ctrl = er32(PHY_CTRL);
2008
2009 if (!active) {
2010 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2011 ew32(PHY_CTRL, phy_ctrl);
2012
2013 if (phy->type != e1000_phy_igp_3)
2014 return 0;
2015
2016 /*
2017 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2018 * during Dx states where the power conservation is most
2019 * important. During driver activity we should enable
2020 * SmartSpeed, so performance is maintained.
2021 */
2022 if (phy->smart_speed == e1000_smart_speed_on) {
2023 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2024 &data);
2025 if (ret_val)
2026 return ret_val;
2027
2028 data |= IGP01E1000_PSCFR_SMART_SPEED;
2029 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2030 data);
2031 if (ret_val)
2032 return ret_val;
2033 } else if (phy->smart_speed == e1000_smart_speed_off) {
2034 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2035 &data);
2036 if (ret_val)
2037 return ret_val;
2038
2039 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2040 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2041 data);
2042 if (ret_val)
2043 return ret_val;
2044 }
2045 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2046 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2047 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2048 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2049 ew32(PHY_CTRL, phy_ctrl);
2050
2051 if (phy->type != e1000_phy_igp_3)
2052 return 0;
2053
2054 /*
2055 * Call gig speed drop workaround on LPLU before accessing
2056 * any PHY registers
2057 */
2058 if (hw->mac.type == e1000_ich8lan)
2059 e1000e_gig_downshift_workaround_ich8lan(hw);
2060
2061 /* When LPLU is enabled, we should disable SmartSpeed */
2062 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2063 if (ret_val)
2064 return ret_val;
2065
2066 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2067 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2068 }
2069
2070 return 0;
2071}
2072
2073/**
2074 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2075 * @hw: pointer to the HW structure
2076 * @bank: pointer to the variable that returns the active bank
2077 *
2078 * Reads signature byte from the NVM using the flash access registers.
2079 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2080 **/
2081static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2082{
2083 u32 eecd;
2084 struct e1000_nvm_info *nvm = &hw->nvm;
2085 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2086 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2087 u8 sig_byte = 0;
2088 s32 ret_val = 0;
2089
2090 switch (hw->mac.type) {
2091 case e1000_ich8lan:
2092 case e1000_ich9lan:
2093 eecd = er32(EECD);
2094 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2095 E1000_EECD_SEC1VAL_VALID_MASK) {
2096 if (eecd & E1000_EECD_SEC1VAL)
2097 *bank = 1;
2098 else
2099 *bank = 0;
2100
2101 return 0;
2102 }
2103 e_dbg("Unable to determine valid NVM bank via EEC - "
2104 "reading flash signature\n");
2105 /* fall-thru */
2106 default:
2107 /* set bank to 0 in case flash read fails */
2108 *bank = 0;
2109
2110 /* Check bank 0 */
2111 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2112 &sig_byte);
2113 if (ret_val)
2114 return ret_val;
2115 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2116 E1000_ICH_NVM_SIG_VALUE) {
2117 *bank = 0;
2118 return 0;
2119 }
2120
2121 /* Check bank 1 */
2122 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2123 bank1_offset,
2124 &sig_byte);
2125 if (ret_val)
2126 return ret_val;
2127 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2128 E1000_ICH_NVM_SIG_VALUE) {
2129 *bank = 1;
2130 return 0;
2131 }
2132
2133 e_dbg("ERROR: No valid NVM bank present\n");
2134 return -E1000_ERR_NVM;
2135 }
2136
2137 return 0;
2138}
2139
2140/**
2141 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2142 * @hw: pointer to the HW structure
2143 * @offset: The offset (in bytes) of the word(s) to read.
2144 * @words: Size of data to read in words
2145 * @data: Pointer to the word(s) to read at offset.
2146 *
2147 * Reads a word(s) from the NVM using the flash access registers.
2148 **/
2149static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2150 u16 *data)
2151{
2152 struct e1000_nvm_info *nvm = &hw->nvm;
2153 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2154 u32 act_offset;
2155 s32 ret_val = 0;
2156 u32 bank = 0;
2157 u16 i, word;
2158
2159 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2160 (words == 0)) {
2161 e_dbg("nvm parameter(s) out of bounds\n");
2162 ret_val = -E1000_ERR_NVM;
2163 goto out;
2164 }
2165
2166 nvm->ops.acquire(hw);
2167
2168 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2169 if (ret_val) {
2170 e_dbg("Could not detect valid bank, assuming bank 0\n");
2171 bank = 0;
2172 }
2173
2174 act_offset = (bank) ? nvm->flash_bank_size : 0;
2175 act_offset += offset;
2176
2177 ret_val = 0;
2178 for (i = 0; i < words; i++) {
2179 if (dev_spec->shadow_ram[offset+i].modified) {
2180 data[i] = dev_spec->shadow_ram[offset+i].value;
2181 } else {
2182 ret_val = e1000_read_flash_word_ich8lan(hw,
2183 act_offset + i,
2184 &word);
2185 if (ret_val)
2186 break;
2187 data[i] = word;
2188 }
2189 }
2190
2191 nvm->ops.release(hw);
2192
2193out:
2194 if (ret_val)
2195 e_dbg("NVM read error: %d\n", ret_val);
2196
2197 return ret_val;
2198}
2199
2200/**
2201 * e1000_flash_cycle_init_ich8lan - Initialize flash
2202 * @hw: pointer to the HW structure
2203 *
2204 * This function does initial flash setup so that a new read/write/erase cycle
2205 * can be started.
2206 **/
2207static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2208{
2209 union ich8_hws_flash_status hsfsts;
2210 s32 ret_val = -E1000_ERR_NVM;
2211
2212 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2213
2214 /* Check if the flash descriptor is valid */
2215 if (hsfsts.hsf_status.fldesvalid == 0) {
2216 e_dbg("Flash descriptor invalid. "
2217 "SW Sequencing must be used.\n");
2218 return -E1000_ERR_NVM;
2219 }
2220
2221 /* Clear FCERR and DAEL in hw status by writing 1 */
2222 hsfsts.hsf_status.flcerr = 1;
2223 hsfsts.hsf_status.dael = 1;
2224
2225 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2226
2227 /*
2228 * Either we should have a hardware SPI cycle in progress
2229 * bit to check against, in order to start a new cycle or
2230 * FDONE bit should be changed in the hardware so that it
2231 * is 1 after hardware reset, which can then be used as an
2232 * indication whether a cycle is in progress or has been
2233 * completed.
2234 */
2235
2236 if (hsfsts.hsf_status.flcinprog == 0) {
2237 /*
2238 * There is no cycle running at present,
2239 * so we can start a cycle.
2240 * Begin by setting Flash Cycle Done.
2241 */
2242 hsfsts.hsf_status.flcdone = 1;
2243 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2244 ret_val = 0;
2245 } else {
2246 s32 i = 0;
2247
2248 /*
2249 * Otherwise poll for sometime so the current
2250 * cycle has a chance to end before giving up.
2251 */
2252 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2253 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
2254 if (hsfsts.hsf_status.flcinprog == 0) {
2255 ret_val = 0;
2256 break;
2257 }
2258 udelay(1);
2259 }
2260 if (ret_val == 0) {
2261 /*
2262 * Successful in waiting for previous cycle to timeout,
2263 * now set the Flash Cycle Done.
2264 */
2265 hsfsts.hsf_status.flcdone = 1;
2266 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2267 } else {
2268 e_dbg("Flash controller busy, cannot get access\n");
2269 }
2270 }
2271
2272 return ret_val;
2273}
2274
2275/**
2276 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2277 * @hw: pointer to the HW structure
2278 * @timeout: maximum time to wait for completion
2279 *
2280 * This function starts a flash cycle and waits for its completion.
2281 **/
2282static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2283{
2284 union ich8_hws_flash_ctrl hsflctl;
2285 union ich8_hws_flash_status hsfsts;
2286 s32 ret_val = -E1000_ERR_NVM;
2287 u32 i = 0;
2288
2289 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2290 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2291 hsflctl.hsf_ctrl.flcgo = 1;
2292 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2293
2294 /* wait till FDONE bit is set to 1 */
2295 do {
2296 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2297 if (hsfsts.hsf_status.flcdone == 1)
2298 break;
2299 udelay(1);
2300 } while (i++ < timeout);
2301
2302 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2303 return 0;
2304
2305 return ret_val;
2306}
2307
2308/**
2309 * e1000_read_flash_word_ich8lan - Read word from flash
2310 * @hw: pointer to the HW structure
2311 * @offset: offset to data location
2312 * @data: pointer to the location for storing the data
2313 *
2314 * Reads the flash word at offset into data. Offset is converted
2315 * to bytes before read.
2316 **/
2317static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2318 u16 *data)
2319{
2320 /* Must convert offset into bytes. */
2321 offset <<= 1;
2322
2323 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2324}
2325
2326/**
2327 * e1000_read_flash_byte_ich8lan - Read byte from flash
2328 * @hw: pointer to the HW structure
2329 * @offset: The offset of the byte to read.
2330 * @data: Pointer to a byte to store the value read.
2331 *
2332 * Reads a single byte from the NVM using the flash access registers.
2333 **/
2334static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2335 u8 *data)
2336{
2337 s32 ret_val;
2338 u16 word = 0;
2339
2340 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2341 if (ret_val)
2342 return ret_val;
2343
2344 *data = (u8)word;
2345
2346 return 0;
2347}
2348
2349/**
2350 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2351 * @hw: pointer to the HW structure
2352 * @offset: The offset (in bytes) of the byte or word to read.
2353 * @size: Size of data to read, 1=byte 2=word
2354 * @data: Pointer to the word to store the value read.
2355 *
2356 * Reads a byte or word from the NVM using the flash access registers.
2357 **/
2358static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2359 u8 size, u16 *data)
2360{
2361 union ich8_hws_flash_status hsfsts;
2362 union ich8_hws_flash_ctrl hsflctl;
2363 u32 flash_linear_addr;
2364 u32 flash_data = 0;
2365 s32 ret_val = -E1000_ERR_NVM;
2366 u8 count = 0;
2367
2368 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2369 return -E1000_ERR_NVM;
2370
2371 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2372 hw->nvm.flash_base_addr;
2373
2374 do {
2375 udelay(1);
2376 /* Steps */
2377 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2378 if (ret_val != 0)
2379 break;
2380
2381 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2382 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2383 hsflctl.hsf_ctrl.fldbcount = size - 1;
2384 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2385 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2386
2387 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2388
2389 ret_val = e1000_flash_cycle_ich8lan(hw,
2390 ICH_FLASH_READ_COMMAND_TIMEOUT);
2391
2392 /*
2393 * Check if FCERR is set to 1, if set to 1, clear it
2394 * and try the whole sequence a few more times, else
2395 * read in (shift in) the Flash Data0, the order is
2396 * least significant byte first msb to lsb
2397 */
2398 if (ret_val == 0) {
2399 flash_data = er32flash(ICH_FLASH_FDATA0);
2400 if (size == 1)
2401 *data = (u8)(flash_data & 0x000000FF);
2402 else if (size == 2)
2403 *data = (u16)(flash_data & 0x0000FFFF);
2404 break;
2405 } else {
2406 /*
2407 * If we've gotten here, then things are probably
2408 * completely hosed, but if the error condition is
2409 * detected, it won't hurt to give it another try...
2410 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2411 */
2412 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2413 if (hsfsts.hsf_status.flcerr == 1) {
2414 /* Repeat for some time before giving up. */
2415 continue;
2416 } else if (hsfsts.hsf_status.flcdone == 0) {
2417 e_dbg("Timeout error - flash cycle "
2418 "did not complete.\n");
2419 break;
2420 }
2421 }
2422 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2423
2424 return ret_val;
2425}
2426
2427/**
2428 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2429 * @hw: pointer to the HW structure
2430 * @offset: The offset (in bytes) of the word(s) to write.
2431 * @words: Size of data to write in words
2432 * @data: Pointer to the word(s) to write at offset.
2433 *
2434 * Writes a byte or word to the NVM using the flash access registers.
2435 **/
2436static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2437 u16 *data)
2438{
2439 struct e1000_nvm_info *nvm = &hw->nvm;
2440 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2441 u16 i;
2442
2443 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2444 (words == 0)) {
2445 e_dbg("nvm parameter(s) out of bounds\n");
2446 return -E1000_ERR_NVM;
2447 }
2448
2449 nvm->ops.acquire(hw);
2450
2451 for (i = 0; i < words; i++) {
2452 dev_spec->shadow_ram[offset+i].modified = true;
2453 dev_spec->shadow_ram[offset+i].value = data[i];
2454 }
2455
2456 nvm->ops.release(hw);
2457
2458 return 0;
2459}
2460
2461/**
2462 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2463 * @hw: pointer to the HW structure
2464 *
2465 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2466 * which writes the checksum to the shadow ram. The changes in the shadow
2467 * ram are then committed to the EEPROM by processing each bank at a time
2468 * checking for the modified bit and writing only the pending changes.
2469 * After a successful commit, the shadow ram is cleared and is ready for
2470 * future writes.
2471 **/
2472static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2473{
2474 struct e1000_nvm_info *nvm = &hw->nvm;
2475 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2476 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2477 s32 ret_val;
2478 u16 data;
2479
2480 ret_val = e1000e_update_nvm_checksum_generic(hw);
2481 if (ret_val)
2482 goto out;
2483
2484 if (nvm->type != e1000_nvm_flash_sw)
2485 goto out;
2486
2487 nvm->ops.acquire(hw);
2488
2489 /*
2490 * We're writing to the opposite bank so if we're on bank 1,
2491 * write to bank 0 etc. We also need to erase the segment that
2492 * is going to be written
2493 */
2494 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2495 if (ret_val) {
2496 e_dbg("Could not detect valid bank, assuming bank 0\n");
2497 bank = 0;
2498 }
2499
2500 if (bank == 0) {
2501 new_bank_offset = nvm->flash_bank_size;
2502 old_bank_offset = 0;
2503 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2504 if (ret_val)
2505 goto release;
2506 } else {
2507 old_bank_offset = nvm->flash_bank_size;
2508 new_bank_offset = 0;
2509 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2510 if (ret_val)
2511 goto release;
2512 }
2513
2514 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2515 /*
2516 * Determine whether to write the value stored
2517 * in the other NVM bank or a modified value stored
2518 * in the shadow RAM
2519 */
2520 if (dev_spec->shadow_ram[i].modified) {
2521 data = dev_spec->shadow_ram[i].value;
2522 } else {
2523 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2524 old_bank_offset,
2525 &data);
2526 if (ret_val)
2527 break;
2528 }
2529
2530 /*
2531 * If the word is 0x13, then make sure the signature bits
2532 * (15:14) are 11b until the commit has completed.
2533 * This will allow us to write 10b which indicates the
2534 * signature is valid. We want to do this after the write
2535 * has completed so that we don't mark the segment valid
2536 * while the write is still in progress
2537 */
2538 if (i == E1000_ICH_NVM_SIG_WORD)
2539 data |= E1000_ICH_NVM_SIG_MASK;
2540
2541 /* Convert offset to bytes. */
2542 act_offset = (i + new_bank_offset) << 1;
2543
2544 udelay(100);
2545 /* Write the bytes to the new bank. */
2546 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2547 act_offset,
2548 (u8)data);
2549 if (ret_val)
2550 break;
2551
2552 udelay(100);
2553 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2554 act_offset + 1,
2555 (u8)(data >> 8));
2556 if (ret_val)
2557 break;
2558 }
2559
2560 /*
2561 * Don't bother writing the segment valid bits if sector
2562 * programming failed.
2563 */
2564 if (ret_val) {
2565 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2566 e_dbg("Flash commit failed.\n");
2567 goto release;
2568 }
2569
2570 /*
2571 * Finally validate the new segment by setting bit 15:14
2572 * to 10b in word 0x13 , this can be done without an
2573 * erase as well since these bits are 11 to start with
2574 * and we need to change bit 14 to 0b
2575 */
2576 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2577 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2578 if (ret_val)
2579 goto release;
2580
2581 data &= 0xBFFF;
2582 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2583 act_offset * 2 + 1,
2584 (u8)(data >> 8));
2585 if (ret_val)
2586 goto release;
2587
2588 /*
2589 * And invalidate the previously valid segment by setting
2590 * its signature word (0x13) high_byte to 0b. This can be
2591 * done without an erase because flash erase sets all bits
2592 * to 1's. We can write 1's to 0's without an erase
2593 */
2594 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2595 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2596 if (ret_val)
2597 goto release;
2598
2599 /* Great! Everything worked, we can now clear the cached entries. */
2600 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2601 dev_spec->shadow_ram[i].modified = false;
2602 dev_spec->shadow_ram[i].value = 0xFFFF;
2603 }
2604
2605release:
2606 nvm->ops.release(hw);
2607
2608 /*
2609 * Reload the EEPROM, or else modifications will not appear
2610 * until after the next adapter reset.
2611 */
2612 if (!ret_val) {
2613 e1000e_reload_nvm(hw);
2614 usleep_range(10000, 20000);
2615 }
2616
2617out:
2618 if (ret_val)
2619 e_dbg("NVM update error: %d\n", ret_val);
2620
2621 return ret_val;
2622}
2623
2624/**
2625 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2626 * @hw: pointer to the HW structure
2627 *
2628 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2629 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2630 * calculated, in which case we need to calculate the checksum and set bit 6.
2631 **/
2632static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2633{
2634 s32 ret_val;
2635 u16 data;
2636
2637 /*
2638 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2639 * needs to be fixed. This bit is an indication that the NVM
2640 * was prepared by OEM software and did not calculate the
2641 * checksum...a likely scenario.
2642 */
2643 ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
2644 if (ret_val)
2645 return ret_val;
2646
2647 if ((data & 0x40) == 0) {
2648 data |= 0x40;
2649 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2650 if (ret_val)
2651 return ret_val;
2652 ret_val = e1000e_update_nvm_checksum(hw);
2653 if (ret_val)
2654 return ret_val;
2655 }
2656
2657 return e1000e_validate_nvm_checksum_generic(hw);
2658}
2659
2660/**
2661 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
2662 * @hw: pointer to the HW structure
2663 *
2664 * To prevent malicious write/erase of the NVM, set it to be read-only
2665 * so that the hardware ignores all write/erase cycles of the NVM via
2666 * the flash control registers. The shadow-ram copy of the NVM will
2667 * still be updated, however any updates to this copy will not stick
2668 * across driver reloads.
2669 **/
2670void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2671{
2672 struct e1000_nvm_info *nvm = &hw->nvm;
2673 union ich8_flash_protected_range pr0;
2674 union ich8_hws_flash_status hsfsts;
2675 u32 gfpreg;
2676
2677 nvm->ops.acquire(hw);
2678
2679 gfpreg = er32flash(ICH_FLASH_GFPREG);
2680
2681 /* Write-protect GbE Sector of NVM */
2682 pr0.regval = er32flash(ICH_FLASH_PR0);
2683 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
2684 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
2685 pr0.range.wpe = true;
2686 ew32flash(ICH_FLASH_PR0, pr0.regval);
2687
2688 /*
2689 * Lock down a subset of GbE Flash Control Registers, e.g.
2690 * PR0 to prevent the write-protection from being lifted.
2691 * Once FLOCKDN is set, the registers protected by it cannot
2692 * be written until FLOCKDN is cleared by a hardware reset.
2693 */
2694 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2695 hsfsts.hsf_status.flockdn = true;
2696 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2697
2698 nvm->ops.release(hw);
2699}
2700
2701/**
2702 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2703 * @hw: pointer to the HW structure
2704 * @offset: The offset (in bytes) of the byte/word to read.
2705 * @size: Size of data to read, 1=byte 2=word
2706 * @data: The byte(s) to write to the NVM.
2707 *
2708 * Writes one/two bytes to the NVM using the flash access registers.
2709 **/
2710static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2711 u8 size, u16 data)
2712{
2713 union ich8_hws_flash_status hsfsts;
2714 union ich8_hws_flash_ctrl hsflctl;
2715 u32 flash_linear_addr;
2716 u32 flash_data = 0;
2717 s32 ret_val;
2718 u8 count = 0;
2719
2720 if (size < 1 || size > 2 || data > size * 0xff ||
2721 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2722 return -E1000_ERR_NVM;
2723
2724 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2725 hw->nvm.flash_base_addr;
2726
2727 do {
2728 udelay(1);
2729 /* Steps */
2730 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2731 if (ret_val)
2732 break;
2733
2734 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2735 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2736 hsflctl.hsf_ctrl.fldbcount = size -1;
2737 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2738 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2739
2740 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2741
2742 if (size == 1)
2743 flash_data = (u32)data & 0x00FF;
2744 else
2745 flash_data = (u32)data;
2746
2747 ew32flash(ICH_FLASH_FDATA0, flash_data);
2748
2749 /*
2750 * check if FCERR is set to 1 , if set to 1, clear it
2751 * and try the whole sequence a few more times else done
2752 */
2753 ret_val = e1000_flash_cycle_ich8lan(hw,
2754 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2755 if (!ret_val)
2756 break;
2757
2758 /*
2759 * If we're here, then things are most likely
2760 * completely hosed, but if the error condition
2761 * is detected, it won't hurt to give it another
2762 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2763 */
2764 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2765 if (hsfsts.hsf_status.flcerr == 1)
2766 /* Repeat for some time before giving up. */
2767 continue;
2768 if (hsfsts.hsf_status.flcdone == 0) {
2769 e_dbg("Timeout error - flash cycle "
2770 "did not complete.");
2771 break;
2772 }
2773 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2774
2775 return ret_val;
2776}
2777
2778/**
2779 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2780 * @hw: pointer to the HW structure
2781 * @offset: The index of the byte to read.
2782 * @data: The byte to write to the NVM.
2783 *
2784 * Writes a single byte to the NVM using the flash access registers.
2785 **/
2786static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2787 u8 data)
2788{
2789 u16 word = (u16)data;
2790
2791 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2792}
2793
2794/**
2795 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2796 * @hw: pointer to the HW structure
2797 * @offset: The offset of the byte to write.
2798 * @byte: The byte to write to the NVM.
2799 *
2800 * Writes a single byte to the NVM using the flash access registers.
2801 * Goes through a retry algorithm before giving up.
2802 **/
2803static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2804 u32 offset, u8 byte)
2805{
2806 s32 ret_val;
2807 u16 program_retries;
2808
2809 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2810 if (!ret_val)
2811 return ret_val;
2812
2813 for (program_retries = 0; program_retries < 100; program_retries++) {
2814 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
2815 udelay(100);
2816 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2817 if (!ret_val)
2818 break;
2819 }
2820 if (program_retries == 100)
2821 return -E1000_ERR_NVM;
2822
2823 return 0;
2824}
2825
2826/**
2827 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2828 * @hw: pointer to the HW structure
2829 * @bank: 0 for first bank, 1 for second bank, etc.
2830 *
2831 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2832 * bank N is 4096 * N + flash_reg_addr.
2833 **/
2834static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2835{
2836 struct e1000_nvm_info *nvm = &hw->nvm;
2837 union ich8_hws_flash_status hsfsts;
2838 union ich8_hws_flash_ctrl hsflctl;
2839 u32 flash_linear_addr;
2840 /* bank size is in 16bit words - adjust to bytes */
2841 u32 flash_bank_size = nvm->flash_bank_size * 2;
2842 s32 ret_val;
2843 s32 count = 0;
2844 s32 j, iteration, sector_size;
2845
2846 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2847
2848 /*
2849 * Determine HW Sector size: Read BERASE bits of hw flash status
2850 * register
2851 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2852 * consecutive sectors. The start index for the nth Hw sector
2853 * can be calculated as = bank * 4096 + n * 256
2854 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2855 * The start index for the nth Hw sector can be calculated
2856 * as = bank * 4096
2857 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2858 * (ich9 only, otherwise error condition)
2859 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2860 */
2861 switch (hsfsts.hsf_status.berasesz) {
2862 case 0:
2863 /* Hw sector size 256 */
2864 sector_size = ICH_FLASH_SEG_SIZE_256;
2865 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2866 break;
2867 case 1:
2868 sector_size = ICH_FLASH_SEG_SIZE_4K;
2869 iteration = 1;
2870 break;
2871 case 2:
2872 sector_size = ICH_FLASH_SEG_SIZE_8K;
2873 iteration = 1;
2874 break;
2875 case 3:
2876 sector_size = ICH_FLASH_SEG_SIZE_64K;
2877 iteration = 1;
2878 break;
2879 default:
2880 return -E1000_ERR_NVM;
2881 }
2882
2883 /* Start with the base address, then add the sector offset. */
2884 flash_linear_addr = hw->nvm.flash_base_addr;
2885 flash_linear_addr += (bank) ? flash_bank_size : 0;
2886
2887 for (j = 0; j < iteration ; j++) {
2888 do {
2889 /* Steps */
2890 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2891 if (ret_val)
2892 return ret_val;
2893
2894 /*
2895 * Write a value 11 (block Erase) in Flash
2896 * Cycle field in hw flash control
2897 */
2898 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2899 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
2900 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2901
2902 /*
2903 * Write the last 24 bits of an index within the
2904 * block into Flash Linear address field in Flash
2905 * Address.
2906 */
2907 flash_linear_addr += (j * sector_size);
2908 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2909
2910 ret_val = e1000_flash_cycle_ich8lan(hw,
2911 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
2912 if (ret_val == 0)
2913 break;
2914
2915 /*
2916 * Check if FCERR is set to 1. If 1,
2917 * clear it and try the whole sequence
2918 * a few more times else Done
2919 */
2920 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2921 if (hsfsts.hsf_status.flcerr == 1)
2922 /* repeat for some time before giving up */
2923 continue;
2924 else if (hsfsts.hsf_status.flcdone == 0)
2925 return ret_val;
2926 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2927 }
2928
2929 return 0;
2930}
2931
2932/**
2933 * e1000_valid_led_default_ich8lan - Set the default LED settings
2934 * @hw: pointer to the HW structure
2935 * @data: Pointer to the LED settings
2936 *
2937 * Reads the LED default settings from the NVM to data. If the NVM LED
2938 * settings is all 0's or F's, set the LED default to a valid LED default
2939 * setting.
2940 **/
2941static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2942{
2943 s32 ret_val;
2944
2945 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
2946 if (ret_val) {
2947 e_dbg("NVM Read Error\n");
2948 return ret_val;
2949 }
2950
2951 if (*data == ID_LED_RESERVED_0000 ||
2952 *data == ID_LED_RESERVED_FFFF)
2953 *data = ID_LED_DEFAULT_ICH8LAN;
2954
2955 return 0;
2956}
2957
2958/**
2959 * e1000_id_led_init_pchlan - store LED configurations
2960 * @hw: pointer to the HW structure
2961 *
2962 * PCH does not control LEDs via the LEDCTL register, rather it uses
2963 * the PHY LED configuration register.
2964 *
2965 * PCH also does not have an "always on" or "always off" mode which
2966 * complicates the ID feature. Instead of using the "on" mode to indicate
2967 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
2968 * use "link_up" mode. The LEDs will still ID on request if there is no
2969 * link based on logic in e1000_led_[on|off]_pchlan().
2970 **/
2971static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2972{
2973 struct e1000_mac_info *mac = &hw->mac;
2974 s32 ret_val;
2975 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2976 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2977 u16 data, i, temp, shift;
2978
2979 /* Get default ID LED modes */
2980 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2981 if (ret_val)
2982 goto out;
2983
2984 mac->ledctl_default = er32(LEDCTL);
2985 mac->ledctl_mode1 = mac->ledctl_default;
2986 mac->ledctl_mode2 = mac->ledctl_default;
2987
2988 for (i = 0; i < 4; i++) {
2989 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2990 shift = (i * 5);
2991 switch (temp) {
2992 case ID_LED_ON1_DEF2:
2993 case ID_LED_ON1_ON2:
2994 case ID_LED_ON1_OFF2:
2995 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2996 mac->ledctl_mode1 |= (ledctl_on << shift);
2997 break;
2998 case ID_LED_OFF1_DEF2:
2999 case ID_LED_OFF1_ON2:
3000 case ID_LED_OFF1_OFF2:
3001 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3002 mac->ledctl_mode1 |= (ledctl_off << shift);
3003 break;
3004 default:
3005 /* Do nothing */
3006 break;
3007 }
3008 switch (temp) {
3009 case ID_LED_DEF1_ON2:
3010 case ID_LED_ON1_ON2:
3011 case ID_LED_OFF1_ON2:
3012 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3013 mac->ledctl_mode2 |= (ledctl_on << shift);
3014 break;
3015 case ID_LED_DEF1_OFF2:
3016 case ID_LED_ON1_OFF2:
3017 case ID_LED_OFF1_OFF2:
3018 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3019 mac->ledctl_mode2 |= (ledctl_off << shift);
3020 break;
3021 default:
3022 /* Do nothing */
3023 break;
3024 }
3025 }
3026
3027out:
3028 return ret_val;
3029}
3030
3031/**
3032 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3033 * @hw: pointer to the HW structure
3034 *
3035 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3036 * register, so the the bus width is hard coded.
3037 **/
3038static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3039{
3040 struct e1000_bus_info *bus = &hw->bus;
3041 s32 ret_val;
3042
3043 ret_val = e1000e_get_bus_info_pcie(hw);
3044
3045 /*
3046 * ICH devices are "PCI Express"-ish. They have
3047 * a configuration space, but do not contain
3048 * PCI Express Capability registers, so bus width
3049 * must be hardcoded.
3050 */
3051 if (bus->width == e1000_bus_width_unknown)
3052 bus->width = e1000_bus_width_pcie_x1;
3053
3054 return ret_val;
3055}
3056
3057/**
3058 * e1000_reset_hw_ich8lan - Reset the hardware
3059 * @hw: pointer to the HW structure
3060 *
3061 * Does a full reset of the hardware which includes a reset of the PHY and
3062 * MAC.
3063 **/
3064static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3065{
3066 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3067 u16 reg;
3068 u32 ctrl, kab;
3069 s32 ret_val;
3070
3071 /*
3072 * Prevent the PCI-E bus from sticking if there is no TLP connection
3073 * on the last TLP read/write transaction when MAC is reset.
3074 */
3075 ret_val = e1000e_disable_pcie_master(hw);
3076 if (ret_val)
3077 e_dbg("PCI-E Master disable polling has failed.\n");
3078
3079 e_dbg("Masking off all interrupts\n");
3080 ew32(IMC, 0xffffffff);
3081
3082 /*
3083 * Disable the Transmit and Receive units. Then delay to allow
3084 * any pending transactions to complete before we hit the MAC
3085 * with the global reset.
3086 */
3087 ew32(RCTL, 0);
3088 ew32(TCTL, E1000_TCTL_PSP);
3089 e1e_flush();
3090
3091 usleep_range(10000, 20000);
3092
3093 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3094 if (hw->mac.type == e1000_ich8lan) {
3095 /* Set Tx and Rx buffer allocation to 8k apiece. */
3096 ew32(PBA, E1000_PBA_8K);
3097 /* Set Packet Buffer Size to 16k. */
3098 ew32(PBS, E1000_PBS_16K);
3099 }
3100
3101 if (hw->mac.type == e1000_pchlan) {
3102 /* Save the NVM K1 bit setting*/
3103 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3104 if (ret_val)
3105 return ret_val;
3106
3107 if (reg & E1000_NVM_K1_ENABLE)
3108 dev_spec->nvm_k1_enabled = true;
3109 else
3110 dev_spec->nvm_k1_enabled = false;
3111 }
3112
3113 ctrl = er32(CTRL);
3114
3115 if (!e1000_check_reset_block(hw)) {
3116 /*
3117 * Full-chip reset requires MAC and PHY reset at the same
3118 * time to make sure the interface between MAC and the
3119 * external PHY is reset.
3120 */
3121 ctrl |= E1000_CTRL_PHY_RST;
3122
3123 /*
3124 * Gate automatic PHY configuration by hardware on
3125 * non-managed 82579
3126 */
3127 if ((hw->mac.type == e1000_pch2lan) &&
3128 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3129 e1000_gate_hw_phy_config_ich8lan(hw, true);
3130 }
3131 ret_val = e1000_acquire_swflag_ich8lan(hw);
3132 e_dbg("Issuing a global reset to ich8lan\n");
3133 ew32(CTRL, (ctrl | E1000_CTRL_RST));
3134 /* cannot issue a flush here because it hangs the hardware */
3135 msleep(20);
3136
3137 if (!ret_val)
3138 mutex_unlock(&swflag_mutex);
3139
3140 if (ctrl & E1000_CTRL_PHY_RST) {
3141 ret_val = hw->phy.ops.get_cfg_done(hw);
3142 if (ret_val)
3143 goto out;
3144
3145 ret_val = e1000_post_phy_reset_ich8lan(hw);
3146 if (ret_val)
3147 goto out;
3148 }
3149
3150 /*
3151 * For PCH, this write will make sure that any noise
3152 * will be detected as a CRC error and be dropped rather than show up
3153 * as a bad packet to the DMA engine.
3154 */
3155 if (hw->mac.type == e1000_pchlan)
3156 ew32(CRC_OFFSET, 0x65656565);
3157
3158 ew32(IMC, 0xffffffff);
3159 er32(ICR);
3160
3161 kab = er32(KABGTXD);
3162 kab |= E1000_KABGTXD_BGSQLBIAS;
3163 ew32(KABGTXD, kab);
3164
3165out:
3166 return ret_val;
3167}
3168
3169/**
3170 * e1000_init_hw_ich8lan - Initialize the hardware
3171 * @hw: pointer to the HW structure
3172 *
3173 * Prepares the hardware for transmit and receive by doing the following:
3174 * - initialize hardware bits
3175 * - initialize LED identification
3176 * - setup receive address registers
3177 * - setup flow control
3178 * - setup transmit descriptors
3179 * - clear statistics
3180 **/
3181static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3182{
3183 struct e1000_mac_info *mac = &hw->mac;
3184 u32 ctrl_ext, txdctl, snoop;
3185 s32 ret_val;
3186 u16 i;
3187
3188 e1000_initialize_hw_bits_ich8lan(hw);
3189
3190 /* Initialize identification LED */
3191 ret_val = mac->ops.id_led_init(hw);
3192 if (ret_val)
3193 e_dbg("Error initializing identification LED\n");
3194 /* This is not fatal and we should not stop init due to this */
3195
3196 /* Setup the receive address. */
3197 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3198
3199 /* Zero out the Multicast HASH table */
3200 e_dbg("Zeroing the MTA\n");
3201 for (i = 0; i < mac->mta_reg_count; i++)
3202 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3203
3204 /*
3205 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3206 * the ME. Disable wakeup by clearing the host wakeup bit.
3207 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3208 */
3209 if (hw->phy.type == e1000_phy_82578) {
3210 e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3211 i &= ~BM_WUC_HOST_WU_BIT;
3212 e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3213 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3214 if (ret_val)
3215 return ret_val;
3216 }
3217
3218 /* Setup link and flow control */
3219 ret_val = e1000_setup_link_ich8lan(hw);
3220
3221 /* Set the transmit descriptor write-back policy for both queues */
3222 txdctl = er32(TXDCTL(0));
3223 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3224 E1000_TXDCTL_FULL_TX_DESC_WB;
3225 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3226 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3227 ew32(TXDCTL(0), txdctl);
3228 txdctl = er32(TXDCTL(1));
3229 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3230 E1000_TXDCTL_FULL_TX_DESC_WB;
3231 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3232 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3233 ew32(TXDCTL(1), txdctl);
3234
3235 /*
3236 * ICH8 has opposite polarity of no_snoop bits.
3237 * By default, we should use snoop behavior.
3238 */
3239 if (mac->type == e1000_ich8lan)
3240 snoop = PCIE_ICH8_SNOOP_ALL;
3241 else
3242 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3243 e1000e_set_pcie_no_snoop(hw, snoop);
3244
3245 ctrl_ext = er32(CTRL_EXT);
3246 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3247 ew32(CTRL_EXT, ctrl_ext);
3248
3249 /*
3250 * Clear all of the statistics registers (clear on read). It is
3251 * important that we do this after we have tried to establish link
3252 * because the symbol error count will increment wildly if there
3253 * is no link.
3254 */
3255 e1000_clear_hw_cntrs_ich8lan(hw);
3256
3257 return 0;
3258}
3259/**
3260 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3261 * @hw: pointer to the HW structure
3262 *
3263 * Sets/Clears required hardware bits necessary for correctly setting up the
3264 * hardware for transmit and receive.
3265 **/
3266static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3267{
3268 u32 reg;
3269
3270 /* Extended Device Control */
3271 reg = er32(CTRL_EXT);
3272 reg |= (1 << 22);
3273 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3274 if (hw->mac.type >= e1000_pchlan)
3275 reg |= E1000_CTRL_EXT_PHYPDEN;
3276 ew32(CTRL_EXT, reg);
3277
3278 /* Transmit Descriptor Control 0 */
3279 reg = er32(TXDCTL(0));
3280 reg |= (1 << 22);
3281 ew32(TXDCTL(0), reg);
3282
3283 /* Transmit Descriptor Control 1 */
3284 reg = er32(TXDCTL(1));
3285 reg |= (1 << 22);
3286 ew32(TXDCTL(1), reg);
3287
3288 /* Transmit Arbitration Control 0 */
3289 reg = er32(TARC(0));
3290 if (hw->mac.type == e1000_ich8lan)
3291 reg |= (1 << 28) | (1 << 29);
3292 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3293 ew32(TARC(0), reg);
3294
3295 /* Transmit Arbitration Control 1 */
3296 reg = er32(TARC(1));
3297 if (er32(TCTL) & E1000_TCTL_MULR)
3298 reg &= ~(1 << 28);
3299 else
3300 reg |= (1 << 28);
3301 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3302 ew32(TARC(1), reg);
3303
3304 /* Device Status */
3305 if (hw->mac.type == e1000_ich8lan) {
3306 reg = er32(STATUS);
3307 reg &= ~(1 << 31);
3308 ew32(STATUS, reg);
3309 }
3310
3311 /*
3312 * work-around descriptor data corruption issue during nfs v2 udp
3313 * traffic, just disable the nfs filtering capability
3314 */
3315 reg = er32(RFCTL);
3316 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3317 ew32(RFCTL, reg);
3318}
3319
3320/**
3321 * e1000_setup_link_ich8lan - Setup flow control and link settings
3322 * @hw: pointer to the HW structure
3323 *
3324 * Determines which flow control settings to use, then configures flow
3325 * control. Calls the appropriate media-specific link configuration
3326 * function. Assuming the adapter has a valid link partner, a valid link
3327 * should be established. Assumes the hardware has previously been reset
3328 * and the transmitter and receiver are not enabled.
3329 **/
3330static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3331{
3332 s32 ret_val;
3333
3334 if (e1000_check_reset_block(hw))
3335 return 0;
3336
3337 /*
3338 * ICH parts do not have a word in the NVM to determine
3339 * the default flow control setting, so we explicitly
3340 * set it to full.
3341 */
3342 if (hw->fc.requested_mode == e1000_fc_default) {
3343 /* Workaround h/w hang when Tx flow control enabled */
3344 if (hw->mac.type == e1000_pchlan)
3345 hw->fc.requested_mode = e1000_fc_rx_pause;
3346 else
3347 hw->fc.requested_mode = e1000_fc_full;
3348 }
3349
3350 /*
3351 * Save off the requested flow control mode for use later. Depending
3352 * on the link partner's capabilities, we may or may not use this mode.
3353 */
3354 hw->fc.current_mode = hw->fc.requested_mode;
3355
3356 e_dbg("After fix-ups FlowControl is now = %x\n",
3357 hw->fc.current_mode);
3358
3359 /* Continue to configure the copper link. */
3360 ret_val = e1000_setup_copper_link_ich8lan(hw);
3361 if (ret_val)
3362 return ret_val;
3363
3364 ew32(FCTTV, hw->fc.pause_time);
3365 if ((hw->phy.type == e1000_phy_82578) ||
3366 (hw->phy.type == e1000_phy_82579) ||
3367 (hw->phy.type == e1000_phy_82577)) {
3368 ew32(FCRTV_PCH, hw->fc.refresh_time);
3369
3370 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3371 hw->fc.pause_time);
3372 if (ret_val)
3373 return ret_val;
3374 }
3375
3376 return e1000e_set_fc_watermarks(hw);
3377}
3378
3379/**
3380 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3381 * @hw: pointer to the HW structure
3382 *
3383 * Configures the kumeran interface to the PHY to wait the appropriate time
3384 * when polling the PHY, then call the generic setup_copper_link to finish
3385 * configuring the copper link.
3386 **/
3387static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3388{
3389 u32 ctrl;
3390 s32 ret_val;
3391 u16 reg_data;
3392
3393 ctrl = er32(CTRL);
3394 ctrl |= E1000_CTRL_SLU;
3395 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3396 ew32(CTRL, ctrl);
3397
3398 /*
3399 * Set the mac to wait the maximum time between each iteration
3400 * and increase the max iterations when polling the phy;
3401 * this fixes erroneous timeouts at 10Mbps.
3402 */
3403 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3404 if (ret_val)
3405 return ret_val;
3406 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3407 &reg_data);
3408 if (ret_val)
3409 return ret_val;
3410 reg_data |= 0x3F;
3411 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3412 reg_data);
3413 if (ret_val)
3414 return ret_val;
3415
3416 switch (hw->phy.type) {
3417 case e1000_phy_igp_3:
3418 ret_val = e1000e_copper_link_setup_igp(hw);
3419 if (ret_val)
3420 return ret_val;
3421 break;
3422 case e1000_phy_bm:
3423 case e1000_phy_82578:
3424 ret_val = e1000e_copper_link_setup_m88(hw);
3425 if (ret_val)
3426 return ret_val;
3427 break;
3428 case e1000_phy_82577:
3429 case e1000_phy_82579:
3430 ret_val = e1000_copper_link_setup_82577(hw);
3431 if (ret_val)
3432 return ret_val;
3433 break;
3434 case e1000_phy_ife:
3435 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3436 if (ret_val)
3437 return ret_val;
3438
3439 reg_data &= ~IFE_PMC_AUTO_MDIX;
3440
3441 switch (hw->phy.mdix) {
3442 case 1:
3443 reg_data &= ~IFE_PMC_FORCE_MDIX;
3444 break;
3445 case 2:
3446 reg_data |= IFE_PMC_FORCE_MDIX;
3447 break;
3448 case 0:
3449 default:
3450 reg_data |= IFE_PMC_AUTO_MDIX;
3451 break;
3452 }
3453 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3454 if (ret_val)
3455 return ret_val;
3456 break;
3457 default:
3458 break;
3459 }
3460 return e1000e_setup_copper_link(hw);
3461}
3462
3463/**
3464 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3465 * @hw: pointer to the HW structure
3466 * @speed: pointer to store current link speed
3467 * @duplex: pointer to store the current link duplex
3468 *
3469 * Calls the generic get_speed_and_duplex to retrieve the current link
3470 * information and then calls the Kumeran lock loss workaround for links at
3471 * gigabit speeds.
3472 **/
3473static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3474 u16 *duplex)
3475{
3476 s32 ret_val;
3477
3478 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3479 if (ret_val)
3480 return ret_val;
3481
3482 if ((hw->mac.type == e1000_ich8lan) &&
3483 (hw->phy.type == e1000_phy_igp_3) &&
3484 (*speed == SPEED_1000)) {
3485 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3486 }
3487
3488 return ret_val;
3489}
3490
3491/**
3492 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3493 * @hw: pointer to the HW structure
3494 *
3495 * Work-around for 82566 Kumeran PCS lock loss:
3496 * On link status change (i.e. PCI reset, speed change) and link is up and
3497 * speed is gigabit-
3498 * 0) if workaround is optionally disabled do nothing
3499 * 1) wait 1ms for Kumeran link to come up
3500 * 2) check Kumeran Diagnostic register PCS lock loss bit
3501 * 3) if not set the link is locked (all is good), otherwise...
3502 * 4) reset the PHY
3503 * 5) repeat up to 10 times
3504 * Note: this is only called for IGP3 copper when speed is 1gb.
3505 **/
3506static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3507{
3508 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3509 u32 phy_ctrl;
3510 s32 ret_val;
3511 u16 i, data;
3512 bool link;
3513
3514 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3515 return 0;
3516
3517 /*
3518 * Make sure link is up before proceeding. If not just return.
3519 * Attempting this while link is negotiating fouled up link
3520 * stability
3521 */
3522 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3523 if (!link)
3524 return 0;
3525
3526 for (i = 0; i < 10; i++) {
3527 /* read once to clear */
3528 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3529 if (ret_val)
3530 return ret_val;
3531 /* and again to get new status */
3532 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3533 if (ret_val)
3534 return ret_val;
3535
3536 /* check for PCS lock */
3537 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3538 return 0;
3539
3540 /* Issue PHY reset */
3541 e1000_phy_hw_reset(hw);
3542 mdelay(5);
3543 }
3544 /* Disable GigE link negotiation */
3545 phy_ctrl = er32(PHY_CTRL);
3546 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3547 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3548 ew32(PHY_CTRL, phy_ctrl);
3549
3550 /*
3551 * Call gig speed drop workaround on Gig disable before accessing
3552 * any PHY registers
3553 */
3554 e1000e_gig_downshift_workaround_ich8lan(hw);
3555
3556 /* unable to acquire PCS lock */
3557 return -E1000_ERR_PHY;
3558}
3559
3560/**
3561 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3562 * @hw: pointer to the HW structure
3563 * @state: boolean value used to set the current Kumeran workaround state
3564 *
3565 * If ICH8, set the current Kumeran workaround state (enabled - true
3566 * /disabled - false).
3567 **/
3568void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3569 bool state)
3570{
3571 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3572
3573 if (hw->mac.type != e1000_ich8lan) {
3574 e_dbg("Workaround applies to ICH8 only.\n");
3575 return;
3576 }
3577
3578 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3579}
3580
3581/**
3582 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3583 * @hw: pointer to the HW structure
3584 *
3585 * Workaround for 82566 power-down on D3 entry:
3586 * 1) disable gigabit link
3587 * 2) write VR power-down enable
3588 * 3) read it back
3589 * Continue if successful, else issue LCD reset and repeat
3590 **/
3591void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3592{
3593 u32 reg;
3594 u16 data;
3595 u8 retry = 0;
3596
3597 if (hw->phy.type != e1000_phy_igp_3)
3598 return;
3599
3600 /* Try the workaround twice (if needed) */
3601 do {
3602 /* Disable link */
3603 reg = er32(PHY_CTRL);
3604 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3605 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3606 ew32(PHY_CTRL, reg);
3607
3608 /*
3609 * Call gig speed drop workaround on Gig disable before
3610 * accessing any PHY registers
3611 */
3612 if (hw->mac.type == e1000_ich8lan)
3613 e1000e_gig_downshift_workaround_ich8lan(hw);
3614
3615 /* Write VR power-down enable */
3616 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3617 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3618 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3619
3620 /* Read it back and test */
3621 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3622 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3623 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3624 break;
3625
3626 /* Issue PHY reset and repeat at most one more time */
3627 reg = er32(CTRL);
3628 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3629 retry++;
3630 } while (retry);
3631}
3632
3633/**
3634 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3635 * @hw: pointer to the HW structure
3636 *
3637 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3638 * LPLU, Gig disable, MDIC PHY reset):
3639 * 1) Set Kumeran Near-end loopback
3640 * 2) Clear Kumeran Near-end loopback
3641 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3642 **/
3643void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3644{
3645 s32 ret_val;
3646 u16 reg_data;
3647
3648 if ((hw->mac.type != e1000_ich8lan) ||
3649 (hw->phy.type != e1000_phy_igp_3))
3650 return;
3651
3652 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3653 &reg_data);
3654 if (ret_val)
3655 return;
3656 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3657 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3658 reg_data);
3659 if (ret_val)
3660 return;
3661 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3662 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3663 reg_data);
3664}
3665
3666/**
3667 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3668 * @hw: pointer to the HW structure
3669 *
3670 * During S0 to Sx transition, it is possible the link remains at gig
3671 * instead of negotiating to a lower speed. Before going to Sx, set
3672 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3673 * to a lower speed. For PCH and newer parts, the OEM bits PHY register
3674 * (LED, GbE disable and LPLU configurations) also needs to be written.
3675 **/
3676void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3677{
3678 u32 phy_ctrl;
3679 s32 ret_val;
3680
3681 phy_ctrl = er32(PHY_CTRL);
3682 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3683 ew32(PHY_CTRL, phy_ctrl);
3684
3685 if (hw->mac.type >= e1000_pchlan) {
3686 e1000_oem_bits_config_ich8lan(hw, false);
3687 ret_val = hw->phy.ops.acquire(hw);
3688 if (ret_val)
3689 return;
3690 e1000_write_smbus_addr(hw);
3691 hw->phy.ops.release(hw);
3692 }
3693}
3694
3695/**
3696 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3697 * @hw: pointer to the HW structure
3698 *
3699 * During Sx to S0 transitions on non-managed devices or managed devices
3700 * on which PHY resets are not blocked, if the PHY registers cannot be
3701 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3702 * the PHY.
3703 **/
3704void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3705{
3706 u32 fwsm;
3707
3708 if (hw->mac.type != e1000_pch2lan)
3709 return;
3710
3711 fwsm = er32(FWSM);
3712 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
3713 u16 phy_id1, phy_id2;
3714 s32 ret_val;
3715
3716 ret_val = hw->phy.ops.acquire(hw);
3717 if (ret_val) {
3718 e_dbg("Failed to acquire PHY semaphore in resume\n");
3719 return;
3720 }
3721
3722 /* Test access to the PHY registers by reading the ID regs */
3723 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3724 if (ret_val)
3725 goto release;
3726 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3727 if (ret_val)
3728 goto release;
3729
3730 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3731 (u32)(phy_id2 & PHY_REVISION_MASK)))
3732 goto release;
3733
3734 e1000_toggle_lanphypc_value_ich8lan(hw);
3735
3736 hw->phy.ops.release(hw);
3737 msleep(50);
3738 e1000_phy_hw_reset(hw);
3739 msleep(50);
3740 return;
3741 }
3742
3743release:
3744 hw->phy.ops.release(hw);
3745
3746 return;
3747}
3748
3749/**
3750 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3751 * @hw: pointer to the HW structure
3752 *
3753 * Return the LED back to the default configuration.
3754 **/
3755static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3756{
3757 if (hw->phy.type == e1000_phy_ife)
3758 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
3759
3760 ew32(LEDCTL, hw->mac.ledctl_default);
3761 return 0;
3762}
3763
3764/**
3765 * e1000_led_on_ich8lan - Turn LEDs on
3766 * @hw: pointer to the HW structure
3767 *
3768 * Turn on the LEDs.
3769 **/
3770static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3771{
3772 if (hw->phy.type == e1000_phy_ife)
3773 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3774 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3775
3776 ew32(LEDCTL, hw->mac.ledctl_mode2);
3777 return 0;
3778}
3779
3780/**
3781 * e1000_led_off_ich8lan - Turn LEDs off
3782 * @hw: pointer to the HW structure
3783 *
3784 * Turn off the LEDs.
3785 **/
3786static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3787{
3788 if (hw->phy.type == e1000_phy_ife)
3789 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3790 (IFE_PSCL_PROBE_MODE |
3791 IFE_PSCL_PROBE_LEDS_OFF));
3792
3793 ew32(LEDCTL, hw->mac.ledctl_mode1);
3794 return 0;
3795}
3796
3797/**
3798 * e1000_setup_led_pchlan - Configures SW controllable LED
3799 * @hw: pointer to the HW structure
3800 *
3801 * This prepares the SW controllable LED for use.
3802 **/
3803static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3804{
3805 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3806}
3807
3808/**
3809 * e1000_cleanup_led_pchlan - Restore the default LED operation
3810 * @hw: pointer to the HW structure
3811 *
3812 * Return the LED back to the default configuration.
3813 **/
3814static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3815{
3816 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3817}
3818
3819/**
3820 * e1000_led_on_pchlan - Turn LEDs on
3821 * @hw: pointer to the HW structure
3822 *
3823 * Turn on the LEDs.
3824 **/
3825static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3826{
3827 u16 data = (u16)hw->mac.ledctl_mode2;
3828 u32 i, led;
3829
3830 /*
3831 * If no link, then turn LED on by setting the invert bit
3832 * for each LED that's mode is "link_up" in ledctl_mode2.
3833 */
3834 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3835 for (i = 0; i < 3; i++) {
3836 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3837 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3838 E1000_LEDCTL_MODE_LINK_UP)
3839 continue;
3840 if (led & E1000_PHY_LED0_IVRT)
3841 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3842 else
3843 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3844 }
3845 }
3846
3847 return e1e_wphy(hw, HV_LED_CONFIG, data);
3848}
3849
3850/**
3851 * e1000_led_off_pchlan - Turn LEDs off
3852 * @hw: pointer to the HW structure
3853 *
3854 * Turn off the LEDs.
3855 **/
3856static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3857{
3858 u16 data = (u16)hw->mac.ledctl_mode1;
3859 u32 i, led;
3860
3861 /*
3862 * If no link, then turn LED off by clearing the invert bit
3863 * for each LED that's mode is "link_up" in ledctl_mode1.
3864 */
3865 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3866 for (i = 0; i < 3; i++) {
3867 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3868 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3869 E1000_LEDCTL_MODE_LINK_UP)
3870 continue;
3871 if (led & E1000_PHY_LED0_IVRT)
3872 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3873 else
3874 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3875 }
3876 }
3877
3878 return e1e_wphy(hw, HV_LED_CONFIG, data);
3879}
3880
3881/**
3882 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
3883 * @hw: pointer to the HW structure
3884 *
3885 * Read appropriate register for the config done bit for completion status
3886 * and configure the PHY through s/w for EEPROM-less parts.
3887 *
3888 * NOTE: some silicon which is EEPROM-less will fail trying to read the
3889 * config done bit, so only an error is logged and continues. If we were
3890 * to return with error, EEPROM-less silicon would not be able to be reset
3891 * or change link.
3892 **/
3893static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3894{
3895 s32 ret_val = 0;
3896 u32 bank = 0;
3897 u32 status;
3898
3899 e1000e_get_cfg_done(hw);
3900
3901 /* Wait for indication from h/w that it has completed basic config */
3902 if (hw->mac.type >= e1000_ich10lan) {
3903 e1000_lan_init_done_ich8lan(hw);
3904 } else {
3905 ret_val = e1000e_get_auto_rd_done(hw);
3906 if (ret_val) {
3907 /*
3908 * When auto config read does not complete, do not
3909 * return with an error. This can happen in situations
3910 * where there is no eeprom and prevents getting link.
3911 */
3912 e_dbg("Auto Read Done did not complete\n");
3913 ret_val = 0;
3914 }
3915 }
3916
3917 /* Clear PHY Reset Asserted bit */
3918 status = er32(STATUS);
3919 if (status & E1000_STATUS_PHYRA)
3920 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3921 else
3922 e_dbg("PHY Reset Asserted not set - needs delay\n");
3923
3924 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3925 if (hw->mac.type <= e1000_ich9lan) {
3926 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
3927 (hw->phy.type == e1000_phy_igp_3)) {
3928 e1000e_phy_init_script_igp3(hw);
3929 }
3930 } else {
3931 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3932 /* Maybe we should do a basic PHY config */
3933 e_dbg("EEPROM not present\n");
3934 ret_val = -E1000_ERR_CONFIG;
3935 }
3936 }
3937
3938 return ret_val;
3939}
3940
3941/**
3942 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
3943 * @hw: pointer to the HW structure
3944 *
3945 * In the case of a PHY power down to save power, or to turn off link during a
3946 * driver unload, or wake on lan is not enabled, remove the link.
3947 **/
3948static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3949{
3950 /* If the management interface is not enabled, then power down */
3951 if (!(hw->mac.ops.check_mng_mode(hw) ||
3952 hw->phy.ops.check_reset_block(hw)))
3953 e1000_power_down_phy_copper(hw);
3954}
3955
3956/**
3957 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
3958 * @hw: pointer to the HW structure
3959 *
3960 * Clears hardware counters specific to the silicon family and calls
3961 * clear_hw_cntrs_generic to clear all general purpose counters.
3962 **/
3963static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3964{
3965 u16 phy_data;
3966 s32 ret_val;
3967
3968 e1000e_clear_hw_cntrs_base(hw);
3969
3970 er32(ALGNERRC);
3971 er32(RXERRC);
3972 er32(TNCRS);
3973 er32(CEXTERR);
3974 er32(TSCTC);
3975 er32(TSCTFC);
3976
3977 er32(MGTPRC);
3978 er32(MGTPDC);
3979 er32(MGTPTC);
3980
3981 er32(IAC);
3982 er32(ICRXOC);
3983
3984 /* Clear PHY statistics registers */
3985 if ((hw->phy.type == e1000_phy_82578) ||
3986 (hw->phy.type == e1000_phy_82579) ||
3987 (hw->phy.type == e1000_phy_82577)) {
3988 ret_val = hw->phy.ops.acquire(hw);
3989 if (ret_val)
3990 return;
3991 ret_val = hw->phy.ops.set_page(hw,
3992 HV_STATS_PAGE << IGP_PAGE_SHIFT);
3993 if (ret_val)
3994 goto release;
3995 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
3996 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
3997 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
3998 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
3999 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4000 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4001 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4002 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4003 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4004 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4005 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4006 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4007 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4008 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4009release:
4010 hw->phy.ops.release(hw);
4011 }
4012}
4013
4014static struct e1000_mac_operations ich8_mac_ops = {
4015 .id_led_init = e1000e_id_led_init,
4016 /* check_mng_mode dependent on mac type */
4017 .check_for_link = e1000_check_for_copper_link_ich8lan,
4018 /* cleanup_led dependent on mac type */
4019 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
4020 .get_bus_info = e1000_get_bus_info_ich8lan,
4021 .set_lan_id = e1000_set_lan_id_single_port,
4022 .get_link_up_info = e1000_get_link_up_info_ich8lan,
4023 /* led_on dependent on mac type */
4024 /* led_off dependent on mac type */
4025 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
4026 .reset_hw = e1000_reset_hw_ich8lan,
4027 .init_hw = e1000_init_hw_ich8lan,
4028 .setup_link = e1000_setup_link_ich8lan,
4029 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4030 /* id_led_init dependent on mac type */
4031};
4032
4033static struct e1000_phy_operations ich8_phy_ops = {
4034 .acquire = e1000_acquire_swflag_ich8lan,
4035 .check_reset_block = e1000_check_reset_block_ich8lan,
4036 .commit = NULL,
4037 .get_cfg_done = e1000_get_cfg_done_ich8lan,
4038 .get_cable_length = e1000e_get_cable_length_igp_2,
4039 .read_reg = e1000e_read_phy_reg_igp,
4040 .release = e1000_release_swflag_ich8lan,
4041 .reset = e1000_phy_hw_reset_ich8lan,
4042 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
4043 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
4044 .write_reg = e1000e_write_phy_reg_igp,
4045};
4046
4047static struct e1000_nvm_operations ich8_nvm_ops = {
4048 .acquire = e1000_acquire_nvm_ich8lan,
4049 .read = e1000_read_nvm_ich8lan,
4050 .release = e1000_release_nvm_ich8lan,
4051 .update = e1000_update_nvm_checksum_ich8lan,
4052 .valid_led_default = e1000_valid_led_default_ich8lan,
4053 .validate = e1000_validate_nvm_checksum_ich8lan,
4054 .write = e1000_write_nvm_ich8lan,
4055};
4056
4057struct e1000_info e1000_ich8_info = {
4058 .mac = e1000_ich8lan,
4059 .flags = FLAG_HAS_WOL
4060 | FLAG_IS_ICH
4061 | FLAG_RX_CSUM_ENABLED
4062 | FLAG_HAS_CTRLEXT_ON_LOAD
4063 | FLAG_HAS_AMT
4064 | FLAG_HAS_FLASH
4065 | FLAG_APME_IN_WUC,
4066 .pba = 8,
4067 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
4068 .get_variants = e1000_get_variants_ich8lan,
4069 .mac_ops = &ich8_mac_ops,
4070 .phy_ops = &ich8_phy_ops,
4071 .nvm_ops = &ich8_nvm_ops,
4072};
4073
4074struct e1000_info e1000_ich9_info = {
4075 .mac = e1000_ich9lan,
4076 .flags = FLAG_HAS_JUMBO_FRAMES
4077 | FLAG_IS_ICH
4078 | FLAG_HAS_WOL
4079 | FLAG_RX_CSUM_ENABLED
4080 | FLAG_HAS_CTRLEXT_ON_LOAD
4081 | FLAG_HAS_AMT
4082 | FLAG_HAS_ERT
4083 | FLAG_HAS_FLASH
4084 | FLAG_APME_IN_WUC,
4085 .pba = 10,
4086 .max_hw_frame_size = DEFAULT_JUMBO,
4087 .get_variants = e1000_get_variants_ich8lan,
4088 .mac_ops = &ich8_mac_ops,
4089 .phy_ops = &ich8_phy_ops,
4090 .nvm_ops = &ich8_nvm_ops,
4091};
4092
4093struct e1000_info e1000_ich10_info = {
4094 .mac = e1000_ich10lan,
4095 .flags = FLAG_HAS_JUMBO_FRAMES
4096 | FLAG_IS_ICH
4097 | FLAG_HAS_WOL
4098 | FLAG_RX_CSUM_ENABLED
4099 | FLAG_HAS_CTRLEXT_ON_LOAD
4100 | FLAG_HAS_AMT
4101 | FLAG_HAS_ERT
4102 | FLAG_HAS_FLASH
4103 | FLAG_APME_IN_WUC,
4104 .pba = 10,
4105 .max_hw_frame_size = DEFAULT_JUMBO,
4106 .get_variants = e1000_get_variants_ich8lan,
4107 .mac_ops = &ich8_mac_ops,
4108 .phy_ops = &ich8_phy_ops,
4109 .nvm_ops = &ich8_nvm_ops,
4110};
4111
4112struct e1000_info e1000_pch_info = {
4113 .mac = e1000_pchlan,
4114 .flags = FLAG_IS_ICH
4115 | FLAG_HAS_WOL
4116 | FLAG_RX_CSUM_ENABLED
4117 | FLAG_HAS_CTRLEXT_ON_LOAD
4118 | FLAG_HAS_AMT
4119 | FLAG_HAS_FLASH
4120 | FLAG_HAS_JUMBO_FRAMES
4121 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4122 | FLAG_APME_IN_WUC,
4123 .flags2 = FLAG2_HAS_PHY_STATS,
4124 .pba = 26,
4125 .max_hw_frame_size = 4096,
4126 .get_variants = e1000_get_variants_ich8lan,
4127 .mac_ops = &ich8_mac_ops,
4128 .phy_ops = &ich8_phy_ops,
4129 .nvm_ops = &ich8_nvm_ops,
4130};
4131
4132struct e1000_info e1000_pch2_info = {
4133 .mac = e1000_pch2lan,
4134 .flags = FLAG_IS_ICH
4135 | FLAG_HAS_WOL
4136 | FLAG_RX_CSUM_ENABLED
4137 | FLAG_HAS_CTRLEXT_ON_LOAD
4138 | FLAG_HAS_AMT
4139 | FLAG_HAS_FLASH
4140 | FLAG_HAS_JUMBO_FRAMES
4141 | FLAG_APME_IN_WUC,
4142 .flags2 = FLAG2_HAS_PHY_STATS
4143 | FLAG2_HAS_EEE,
4144 .pba = 26,
4145 .max_hw_frame_size = DEFAULT_JUMBO,
4146 .get_variants = e1000_get_variants_ich8lan,
4147 .mac_ops = &ich8_mac_ops,
4148 .phy_ops = &ich8_phy_ops,
4149 .nvm_ops = &ich8_nvm_ops,
4150};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
new file mode 100644
index 00000000000..0893ab107ad
--- /dev/null
+++ b/drivers/net/e1000e/lib.c
@@ -0,0 +1,2693 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
30
31enum e1000_mng_mode {
32 e1000_mng_mode_none = 0,
33 e1000_mng_mode_asf,
34 e1000_mng_mode_pt,
35 e1000_mng_mode_ipmi,
36 e1000_mng_mode_host_if_only
37};
38
39#define E1000_FACTPS_MNGCG 0x20000000
40
41/* Intel(R) Active Management Technology signature */
42#define E1000_IAMT_SIGNATURE 0x544D4149
43
44/**
45 * e1000e_get_bus_info_pcie - Get PCIe bus information
46 * @hw: pointer to the HW structure
47 *
48 * Determines and stores the system bus information for a particular
49 * network interface. The following bus information is determined and stored:
50 * bus speed, bus width, type (PCIe), and PCIe function.
51 **/
52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
53{
54 struct e1000_mac_info *mac = &hw->mac;
55 struct e1000_bus_info *bus = &hw->bus;
56 struct e1000_adapter *adapter = hw->adapter;
57 u16 pcie_link_status, cap_offset;
58
59 cap_offset = adapter->pdev->pcie_cap;
60 if (!cap_offset) {
61 bus->width = e1000_bus_width_unknown;
62 } else {
63 pci_read_config_word(adapter->pdev,
64 cap_offset + PCIE_LINK_STATUS,
65 &pcie_link_status);
66 bus->width = (enum e1000_bus_width)((pcie_link_status &
67 PCIE_LINK_WIDTH_MASK) >>
68 PCIE_LINK_WIDTH_SHIFT);
69 }
70
71 mac->ops.set_lan_id(hw);
72
73 return 0;
74}
75
76/**
77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
78 *
79 * @hw: pointer to the HW structure
80 *
81 * Determines the LAN function id by reading memory-mapped registers
82 * and swaps the port value if requested.
83 **/
84void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
85{
86 struct e1000_bus_info *bus = &hw->bus;
87 u32 reg;
88
89 /*
90 * The status register reports the correct function number
91 * for the device regardless of function swap state.
92 */
93 reg = er32(STATUS);
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
95}
96
97/**
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device
99 * @hw: pointer to the HW structure
100 *
101 * Sets the LAN function id to zero for a single port device.
102 **/
103void e1000_set_lan_id_single_port(struct e1000_hw *hw)
104{
105 struct e1000_bus_info *bus = &hw->bus;
106
107 bus->func = 0;
108}
109
110/**
111 * e1000_clear_vfta_generic - Clear VLAN filter table
112 * @hw: pointer to the HW structure
113 *
114 * Clears the register array which contains the VLAN filter table by
115 * setting all the values to 0.
116 **/
117void e1000_clear_vfta_generic(struct e1000_hw *hw)
118{
119 u32 offset;
120
121 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
122 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
123 e1e_flush();
124 }
125}
126
127/**
128 * e1000_write_vfta_generic - Write value to VLAN filter table
129 * @hw: pointer to the HW structure
130 * @offset: register offset in VLAN filter table
131 * @value: register value written to VLAN filter table
132 *
133 * Writes value at the given offset in the register array which stores
134 * the VLAN filter table.
135 **/
136void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
137{
138 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
139 e1e_flush();
140}
141
142/**
143 * e1000e_init_rx_addrs - Initialize receive address's
144 * @hw: pointer to the HW structure
145 * @rar_count: receive address registers
146 *
147 * Setup the receive address registers by setting the base receive address
148 * register to the devices MAC address and clearing all the other receive
149 * address registers to 0.
150 **/
151void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
152{
153 u32 i;
154 u8 mac_addr[ETH_ALEN] = {0};
155
156 /* Setup the receive address */
157 e_dbg("Programming MAC Address into RAR[0]\n");
158
159 e1000e_rar_set(hw, hw->mac.addr, 0);
160
161 /* Zero out the other (rar_entry_count - 1) receive addresses */
162 e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
163 for (i = 1; i < rar_count; i++)
164 e1000e_rar_set(hw, mac_addr, i);
165}
166
167/**
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
169 * @hw: pointer to the HW structure
170 *
171 * Checks the nvm for an alternate MAC address. An alternate MAC address
172 * can be setup by pre-boot software and must be treated like a permanent
173 * address and must override the actual permanent MAC address. If an
174 * alternate MAC address is found it is programmed into RAR0, replacing
175 * the permanent address that was installed into RAR0 by the Si on reset.
176 * This function will return SUCCESS unless it encounters an error while
177 * reading the EEPROM.
178 **/
179s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
180{
181 u32 i;
182 s32 ret_val = 0;
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN];
185
186 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
187 if (ret_val)
188 goto out;
189
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
191 if (!((nvm_data & NVM_COMPAT_LOM) ||
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
194 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
195 goto out;
196
197 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
198 &nvm_alt_mac_addr_offset);
199 if (ret_val) {
200 e_dbg("NVM Read Error\n");
201 goto out;
202 }
203
204 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
205 (nvm_alt_mac_addr_offset == 0x0000))
206 /* There is no Alternate MAC Address */
207 goto out;
208
209 if (hw->bus.func == E1000_FUNC_1)
210 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
211 for (i = 0; i < ETH_ALEN; i += 2) {
212 offset = nvm_alt_mac_addr_offset + (i >> 1);
213 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
214 if (ret_val) {
215 e_dbg("NVM Read Error\n");
216 goto out;
217 }
218
219 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
220 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
221 }
222
223 /* if multicast bit is set, the alternate address will not be used */
224 if (is_multicast_ether_addr(alt_mac_addr)) {
225 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
226 goto out;
227 }
228
229 /*
230 * We have a valid alternate MAC address, and we want to treat it the
231 * same as the normal permanent MAC address stored by the HW into the
232 * RAR. Do this by mapping this address into RAR0.
233 */
234 e1000e_rar_set(hw, alt_mac_addr, 0);
235
236out:
237 return ret_val;
238}
239
240/**
241 * e1000e_rar_set - Set receive address register
242 * @hw: pointer to the HW structure
243 * @addr: pointer to the receive address
244 * @index: receive address array register
245 *
246 * Sets the receive address array register at index to the address passed
247 * in by addr.
248 **/
249void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
250{
251 u32 rar_low, rar_high;
252
253 /*
254 * HW expects these in little endian so we reverse the byte order
255 * from network order (big endian) to little endian
256 */
257 rar_low = ((u32) addr[0] |
258 ((u32) addr[1] << 8) |
259 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
260
261 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
262
263 /* If MAC address zero, no need to set the AV bit */
264 if (rar_low || rar_high)
265 rar_high |= E1000_RAH_AV;
266
267 /*
268 * Some bridges will combine consecutive 32-bit writes into
269 * a single burst write, which will malfunction on some parts.
270 * The flushes avoid this.
271 */
272 ew32(RAL(index), rar_low);
273 e1e_flush();
274 ew32(RAH(index), rar_high);
275 e1e_flush();
276}
277
278/**
279 * e1000_hash_mc_addr - Generate a multicast hash value
280 * @hw: pointer to the HW structure
281 * @mc_addr: pointer to a multicast address
282 *
283 * Generates a multicast address hash value which is used to determine
284 * the multicast filter table array address and new table value. See
285 * e1000_mta_set_generic()
286 **/
287static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
288{
289 u32 hash_value, hash_mask;
290 u8 bit_shift = 0;
291
292 /* Register count multiplied by bits per register */
293 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
294
295 /*
296 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
297 * where 0xFF would still fall within the hash mask.
298 */
299 while (hash_mask >> bit_shift != 0xFF)
300 bit_shift++;
301
302 /*
303 * The portion of the address that is used for the hash table
304 * is determined by the mc_filter_type setting.
305 * The algorithm is such that there is a total of 8 bits of shifting.
306 * The bit_shift for a mc_filter_type of 0 represents the number of
307 * left-shifts where the MSB of mc_addr[5] would still fall within
308 * the hash_mask. Case 0 does this exactly. Since there are a total
309 * of 8 bits of shifting, then mc_addr[4] will shift right the
310 * remaining number of bits. Thus 8 - bit_shift. The rest of the
311 * cases are a variation of this algorithm...essentially raising the
312 * number of bits to shift mc_addr[5] left, while still keeping the
313 * 8-bit shifting total.
314 *
315 * For example, given the following Destination MAC Address and an
316 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
317 * we can see that the bit_shift for case 0 is 4. These are the hash
318 * values resulting from each mc_filter_type...
319 * [0] [1] [2] [3] [4] [5]
320 * 01 AA 00 12 34 56
321 * LSB MSB
322 *
323 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
324 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
325 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
326 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
327 */
328 switch (hw->mac.mc_filter_type) {
329 default:
330 case 0:
331 break;
332 case 1:
333 bit_shift += 1;
334 break;
335 case 2:
336 bit_shift += 2;
337 break;
338 case 3:
339 bit_shift += 4;
340 break;
341 }
342
343 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
344 (((u16) mc_addr[5]) << bit_shift)));
345
346 return hash_value;
347}
348
349/**
350 * e1000e_update_mc_addr_list_generic - Update Multicast addresses
351 * @hw: pointer to the HW structure
352 * @mc_addr_list: array of multicast addresses to program
353 * @mc_addr_count: number of multicast addresses to program
354 *
355 * Updates entire Multicast Table Array.
356 * The caller must have a packed mc_addr_list of multicast addresses.
357 **/
358void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
359 u8 *mc_addr_list, u32 mc_addr_count)
360{
361 u32 hash_value, hash_bit, hash_reg;
362 int i;
363
364 /* clear mta_shadow */
365 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
366
367 /* update mta_shadow from mc_addr_list */
368 for (i = 0; (u32) i < mc_addr_count; i++) {
369 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
370
371 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
372 hash_bit = hash_value & 0x1F;
373
374 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
375 mc_addr_list += (ETH_ALEN);
376 }
377
378 /* replace the entire MTA table */
379 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
380 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
381 e1e_flush();
382}
383
384/**
385 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
386 * @hw: pointer to the HW structure
387 *
388 * Clears the base hardware counters by reading the counter registers.
389 **/
390void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
391{
392 er32(CRCERRS);
393 er32(SYMERRS);
394 er32(MPC);
395 er32(SCC);
396 er32(ECOL);
397 er32(MCC);
398 er32(LATECOL);
399 er32(COLC);
400 er32(DC);
401 er32(SEC);
402 er32(RLEC);
403 er32(XONRXC);
404 er32(XONTXC);
405 er32(XOFFRXC);
406 er32(XOFFTXC);
407 er32(FCRUC);
408 er32(GPRC);
409 er32(BPRC);
410 er32(MPRC);
411 er32(GPTC);
412 er32(GORCL);
413 er32(GORCH);
414 er32(GOTCL);
415 er32(GOTCH);
416 er32(RNBC);
417 er32(RUC);
418 er32(RFC);
419 er32(ROC);
420 er32(RJC);
421 er32(TORL);
422 er32(TORH);
423 er32(TOTL);
424 er32(TOTH);
425 er32(TPR);
426 er32(TPT);
427 er32(MPTC);
428 er32(BPTC);
429}
430
431/**
432 * e1000e_check_for_copper_link - Check for link (Copper)
433 * @hw: pointer to the HW structure
434 *
435 * Checks to see of the link status of the hardware has changed. If a
436 * change in link status has been detected, then we read the PHY registers
437 * to get the current speed/duplex if link exists.
438 **/
439s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
440{
441 struct e1000_mac_info *mac = &hw->mac;
442 s32 ret_val;
443 bool link;
444
445 /*
446 * We only want to go out to the PHY registers to see if Auto-Neg
447 * has completed and/or if our link status has changed. The
448 * get_link_status flag is set upon receiving a Link Status
449 * Change or Rx Sequence Error interrupt.
450 */
451 if (!mac->get_link_status)
452 return 0;
453
454 /*
455 * First we want to see if the MII Status Register reports
456 * link. If so, then we want to get the current speed/duplex
457 * of the PHY.
458 */
459 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
460 if (ret_val)
461 return ret_val;
462
463 if (!link)
464 return ret_val; /* No link detected */
465
466 mac->get_link_status = false;
467
468 /*
469 * Check if there was DownShift, must be checked
470 * immediately after link-up
471 */
472 e1000e_check_downshift(hw);
473
474 /*
475 * If we are forcing speed/duplex, then we simply return since
476 * we have already determined whether we have link or not.
477 */
478 if (!mac->autoneg) {
479 ret_val = -E1000_ERR_CONFIG;
480 return ret_val;
481 }
482
483 /*
484 * Auto-Neg is enabled. Auto Speed Detection takes care
485 * of MAC speed/duplex configuration. So we only need to
486 * configure Collision Distance in the MAC.
487 */
488 e1000e_config_collision_dist(hw);
489
490 /*
491 * Configure Flow Control now that Auto-Neg has completed.
492 * First, we need to restore the desired flow control
493 * settings because we may have had to re-autoneg with a
494 * different link partner.
495 */
496 ret_val = e1000e_config_fc_after_link_up(hw);
497 if (ret_val)
498 e_dbg("Error configuring flow control\n");
499
500 return ret_val;
501}
502
503/**
504 * e1000e_check_for_fiber_link - Check for link (Fiber)
505 * @hw: pointer to the HW structure
506 *
507 * Checks for link up on the hardware. If link is not up and we have
508 * a signal, then we need to force link up.
509 **/
510s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
511{
512 struct e1000_mac_info *mac = &hw->mac;
513 u32 rxcw;
514 u32 ctrl;
515 u32 status;
516 s32 ret_val;
517
518 ctrl = er32(CTRL);
519 status = er32(STATUS);
520 rxcw = er32(RXCW);
521
522 /*
523 * If we don't have link (auto-negotiation failed or link partner
524 * cannot auto-negotiate), the cable is plugged in (we have signal),
525 * and our link partner is not trying to auto-negotiate with us (we
526 * are receiving idles or data), we need to force link up. We also
527 * need to give auto-negotiation time to complete, in case the cable
528 * was just plugged in. The autoneg_failed flag does this.
529 */
530 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
531 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
532 (!(rxcw & E1000_RXCW_C))) {
533 if (mac->autoneg_failed == 0) {
534 mac->autoneg_failed = 1;
535 return 0;
536 }
537 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
538
539 /* Disable auto-negotiation in the TXCW register */
540 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
541
542 /* Force link-up and also force full-duplex. */
543 ctrl = er32(CTRL);
544 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
545 ew32(CTRL, ctrl);
546
547 /* Configure Flow Control after forcing link up. */
548 ret_val = e1000e_config_fc_after_link_up(hw);
549 if (ret_val) {
550 e_dbg("Error configuring flow control\n");
551 return ret_val;
552 }
553 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
554 /*
555 * If we are forcing link and we are receiving /C/ ordered
556 * sets, re-enable auto-negotiation in the TXCW register
557 * and disable forced link in the Device Control register
558 * in an attempt to auto-negotiate with our link partner.
559 */
560 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
561 ew32(TXCW, mac->txcw);
562 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
563
564 mac->serdes_has_link = true;
565 }
566
567 return 0;
568}
569
570/**
571 * e1000e_check_for_serdes_link - Check for link (Serdes)
572 * @hw: pointer to the HW structure
573 *
574 * Checks for link up on the hardware. If link is not up and we have
575 * a signal, then we need to force link up.
576 **/
577s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
578{
579 struct e1000_mac_info *mac = &hw->mac;
580 u32 rxcw;
581 u32 ctrl;
582 u32 status;
583 s32 ret_val;
584
585 ctrl = er32(CTRL);
586 status = er32(STATUS);
587 rxcw = er32(RXCW);
588
589 /*
590 * If we don't have link (auto-negotiation failed or link partner
591 * cannot auto-negotiate), and our link partner is not trying to
592 * auto-negotiate with us (we are receiving idles or data),
593 * we need to force link up. We also need to give auto-negotiation
594 * time to complete.
595 */
596 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
597 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
598 if (mac->autoneg_failed == 0) {
599 mac->autoneg_failed = 1;
600 return 0;
601 }
602 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
603
604 /* Disable auto-negotiation in the TXCW register */
605 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
606
607 /* Force link-up and also force full-duplex. */
608 ctrl = er32(CTRL);
609 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
610 ew32(CTRL, ctrl);
611
612 /* Configure Flow Control after forcing link up. */
613 ret_val = e1000e_config_fc_after_link_up(hw);
614 if (ret_val) {
615 e_dbg("Error configuring flow control\n");
616 return ret_val;
617 }
618 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
619 /*
620 * If we are forcing link and we are receiving /C/ ordered
621 * sets, re-enable auto-negotiation in the TXCW register
622 * and disable forced link in the Device Control register
623 * in an attempt to auto-negotiate with our link partner.
624 */
625 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
626 ew32(TXCW, mac->txcw);
627 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
628
629 mac->serdes_has_link = true;
630 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
631 /*
632 * If we force link for non-auto-negotiation switch, check
633 * link status based on MAC synchronization for internal
634 * serdes media type.
635 */
636 /* SYNCH bit and IV bit are sticky. */
637 udelay(10);
638 rxcw = er32(RXCW);
639 if (rxcw & E1000_RXCW_SYNCH) {
640 if (!(rxcw & E1000_RXCW_IV)) {
641 mac->serdes_has_link = true;
642 e_dbg("SERDES: Link up - forced.\n");
643 }
644 } else {
645 mac->serdes_has_link = false;
646 e_dbg("SERDES: Link down - force failed.\n");
647 }
648 }
649
650 if (E1000_TXCW_ANE & er32(TXCW)) {
651 status = er32(STATUS);
652 if (status & E1000_STATUS_LU) {
653 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
654 udelay(10);
655 rxcw = er32(RXCW);
656 if (rxcw & E1000_RXCW_SYNCH) {
657 if (!(rxcw & E1000_RXCW_IV)) {
658 mac->serdes_has_link = true;
659 e_dbg("SERDES: Link up - autoneg "
660 "completed successfully.\n");
661 } else {
662 mac->serdes_has_link = false;
663 e_dbg("SERDES: Link down - invalid"
664 "codewords detected in autoneg.\n");
665 }
666 } else {
667 mac->serdes_has_link = false;
668 e_dbg("SERDES: Link down - no sync.\n");
669 }
670 } else {
671 mac->serdes_has_link = false;
672 e_dbg("SERDES: Link down - autoneg failed\n");
673 }
674 }
675
676 return 0;
677}
678
679/**
680 * e1000_set_default_fc_generic - Set flow control default values
681 * @hw: pointer to the HW structure
682 *
683 * Read the EEPROM for the default values for flow control and store the
684 * values.
685 **/
686static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
687{
688 s32 ret_val;
689 u16 nvm_data;
690
691 /*
692 * Read and store word 0x0F of the EEPROM. This word contains bits
693 * that determine the hardware's default PAUSE (flow control) mode,
694 * a bit that determines whether the HW defaults to enabling or
695 * disabling auto-negotiation, and the direction of the
696 * SW defined pins. If there is no SW over-ride of the flow
697 * control setting, then the variable hw->fc will
698 * be initialized based on a value in the EEPROM.
699 */
700 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
701
702 if (ret_val) {
703 e_dbg("NVM Read Error\n");
704 return ret_val;
705 }
706
707 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
708 hw->fc.requested_mode = e1000_fc_none;
709 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
710 NVM_WORD0F_ASM_DIR)
711 hw->fc.requested_mode = e1000_fc_tx_pause;
712 else
713 hw->fc.requested_mode = e1000_fc_full;
714
715 return 0;
716}
717
718/**
719 * e1000e_setup_link - Setup flow control and link settings
720 * @hw: pointer to the HW structure
721 *
722 * Determines which flow control settings to use, then configures flow
723 * control. Calls the appropriate media-specific link configuration
724 * function. Assuming the adapter has a valid link partner, a valid link
725 * should be established. Assumes the hardware has previously been reset
726 * and the transmitter and receiver are not enabled.
727 **/
728s32 e1000e_setup_link(struct e1000_hw *hw)
729{
730 struct e1000_mac_info *mac = &hw->mac;
731 s32 ret_val;
732
733 /*
734 * In the case of the phy reset being blocked, we already have a link.
735 * We do not need to set it up again.
736 */
737 if (e1000_check_reset_block(hw))
738 return 0;
739
740 /*
741 * If requested flow control is set to default, set flow control
742 * based on the EEPROM flow control settings.
743 */
744 if (hw->fc.requested_mode == e1000_fc_default) {
745 ret_val = e1000_set_default_fc_generic(hw);
746 if (ret_val)
747 return ret_val;
748 }
749
750 /*
751 * Save off the requested flow control mode for use later. Depending
752 * on the link partner's capabilities, we may or may not use this mode.
753 */
754 hw->fc.current_mode = hw->fc.requested_mode;
755
756 e_dbg("After fix-ups FlowControl is now = %x\n",
757 hw->fc.current_mode);
758
759 /* Call the necessary media_type subroutine to configure the link. */
760 ret_val = mac->ops.setup_physical_interface(hw);
761 if (ret_val)
762 return ret_val;
763
764 /*
765 * Initialize the flow control address, type, and PAUSE timer
766 * registers to their default values. This is done even if flow
767 * control is disabled, because it does not hurt anything to
768 * initialize these registers.
769 */
770 e_dbg("Initializing the Flow Control address, type and timer regs\n");
771 ew32(FCT, FLOW_CONTROL_TYPE);
772 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
773 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
774
775 ew32(FCTTV, hw->fc.pause_time);
776
777 return e1000e_set_fc_watermarks(hw);
778}
779
780/**
781 * e1000_commit_fc_settings_generic - Configure flow control
782 * @hw: pointer to the HW structure
783 *
784 * Write the flow control settings to the Transmit Config Word Register (TXCW)
785 * base on the flow control settings in e1000_mac_info.
786 **/
787static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
788{
789 struct e1000_mac_info *mac = &hw->mac;
790 u32 txcw;
791
792 /*
793 * Check for a software override of the flow control settings, and
794 * setup the device accordingly. If auto-negotiation is enabled, then
795 * software will have to set the "PAUSE" bits to the correct value in
796 * the Transmit Config Word Register (TXCW) and re-start auto-
797 * negotiation. However, if auto-negotiation is disabled, then
798 * software will have to manually configure the two flow control enable
799 * bits in the CTRL register.
800 *
801 * The possible values of the "fc" parameter are:
802 * 0: Flow control is completely disabled
803 * 1: Rx flow control is enabled (we can receive pause frames,
804 * but not send pause frames).
805 * 2: Tx flow control is enabled (we can send pause frames but we
806 * do not support receiving pause frames).
807 * 3: Both Rx and Tx flow control (symmetric) are enabled.
808 */
809 switch (hw->fc.current_mode) {
810 case e1000_fc_none:
811 /* Flow control completely disabled by a software over-ride. */
812 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
813 break;
814 case e1000_fc_rx_pause:
815 /*
816 * Rx Flow control is enabled and Tx Flow control is disabled
817 * by a software over-ride. Since there really isn't a way to
818 * advertise that we are capable of Rx Pause ONLY, we will
819 * advertise that we support both symmetric and asymmetric Rx
820 * PAUSE. Later, we will disable the adapter's ability to send
821 * PAUSE frames.
822 */
823 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
824 break;
825 case e1000_fc_tx_pause:
826 /*
827 * Tx Flow control is enabled, and Rx Flow control is disabled,
828 * by a software over-ride.
829 */
830 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
831 break;
832 case e1000_fc_full:
833 /*
834 * Flow control (both Rx and Tx) is enabled by a software
835 * over-ride.
836 */
837 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
838 break;
839 default:
840 e_dbg("Flow control param set incorrectly\n");
841 return -E1000_ERR_CONFIG;
842 break;
843 }
844
845 ew32(TXCW, txcw);
846 mac->txcw = txcw;
847
848 return 0;
849}
850
851/**
852 * e1000_poll_fiber_serdes_link_generic - Poll for link up
853 * @hw: pointer to the HW structure
854 *
855 * Polls for link up by reading the status register, if link fails to come
856 * up with auto-negotiation, then the link is forced if a signal is detected.
857 **/
858static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
859{
860 struct e1000_mac_info *mac = &hw->mac;
861 u32 i, status;
862 s32 ret_val;
863
864 /*
865 * If we have a signal (the cable is plugged in, or assumed true for
866 * serdes media) then poll for a "Link-Up" indication in the Device
867 * Status Register. Time-out if a link isn't seen in 500 milliseconds
868 * seconds (Auto-negotiation should complete in less than 500
869 * milliseconds even if the other end is doing it in SW).
870 */
871 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
872 usleep_range(10000, 20000);
873 status = er32(STATUS);
874 if (status & E1000_STATUS_LU)
875 break;
876 }
877 if (i == FIBER_LINK_UP_LIMIT) {
878 e_dbg("Never got a valid link from auto-neg!!!\n");
879 mac->autoneg_failed = 1;
880 /*
881 * AutoNeg failed to achieve a link, so we'll call
882 * mac->check_for_link. This routine will force the
883 * link up if we detect a signal. This will allow us to
884 * communicate with non-autonegotiating link partners.
885 */
886 ret_val = mac->ops.check_for_link(hw);
887 if (ret_val) {
888 e_dbg("Error while checking for link\n");
889 return ret_val;
890 }
891 mac->autoneg_failed = 0;
892 } else {
893 mac->autoneg_failed = 0;
894 e_dbg("Valid Link Found\n");
895 }
896
897 return 0;
898}
899
900/**
901 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
902 * @hw: pointer to the HW structure
903 *
904 * Configures collision distance and flow control for fiber and serdes
905 * links. Upon successful setup, poll for link.
906 **/
907s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
908{
909 u32 ctrl;
910 s32 ret_val;
911
912 ctrl = er32(CTRL);
913
914 /* Take the link out of reset */
915 ctrl &= ~E1000_CTRL_LRST;
916
917 e1000e_config_collision_dist(hw);
918
919 ret_val = e1000_commit_fc_settings_generic(hw);
920 if (ret_val)
921 return ret_val;
922
923 /*
924 * Since auto-negotiation is enabled, take the link out of reset (the
925 * link will be in reset, because we previously reset the chip). This
926 * will restart auto-negotiation. If auto-negotiation is successful
927 * then the link-up status bit will be set and the flow control enable
928 * bits (RFCE and TFCE) will be set according to their negotiated value.
929 */
930 e_dbg("Auto-negotiation enabled\n");
931
932 ew32(CTRL, ctrl);
933 e1e_flush();
934 usleep_range(1000, 2000);
935
936 /*
937 * For these adapters, the SW definable pin 1 is set when the optics
938 * detect a signal. If we have a signal, then poll for a "Link-Up"
939 * indication.
940 */
941 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
942 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
943 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
944 } else {
945 e_dbg("No signal detected\n");
946 }
947
948 return 0;
949}
950
951/**
952 * e1000e_config_collision_dist - Configure collision distance
953 * @hw: pointer to the HW structure
954 *
955 * Configures the collision distance to the default value and is used
956 * during link setup. Currently no func pointer exists and all
957 * implementations are handled in the generic version of this function.
958 **/
959void e1000e_config_collision_dist(struct e1000_hw *hw)
960{
961 u32 tctl;
962
963 tctl = er32(TCTL);
964
965 tctl &= ~E1000_TCTL_COLD;
966 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
967
968 ew32(TCTL, tctl);
969 e1e_flush();
970}
971
972/**
973 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
974 * @hw: pointer to the HW structure
975 *
976 * Sets the flow control high/low threshold (watermark) registers. If
977 * flow control XON frame transmission is enabled, then set XON frame
978 * transmission as well.
979 **/
980s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
981{
982 u32 fcrtl = 0, fcrth = 0;
983
984 /*
985 * Set the flow control receive threshold registers. Normally,
986 * these registers will be set to a default threshold that may be
987 * adjusted later by the driver's runtime code. However, if the
988 * ability to transmit pause frames is not enabled, then these
989 * registers will be set to 0.
990 */
991 if (hw->fc.current_mode & e1000_fc_tx_pause) {
992 /*
993 * We need to set up the Receive Threshold high and low water
994 * marks as well as (optionally) enabling the transmission of
995 * XON frames.
996 */
997 fcrtl = hw->fc.low_water;
998 fcrtl |= E1000_FCRTL_XONE;
999 fcrth = hw->fc.high_water;
1000 }
1001 ew32(FCRTL, fcrtl);
1002 ew32(FCRTH, fcrth);
1003
1004 return 0;
1005}
1006
1007/**
1008 * e1000e_force_mac_fc - Force the MAC's flow control settings
1009 * @hw: pointer to the HW structure
1010 *
1011 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
1012 * device control register to reflect the adapter settings. TFCE and RFCE
1013 * need to be explicitly set by software when a copper PHY is used because
1014 * autonegotiation is managed by the PHY rather than the MAC. Software must
1015 * also configure these bits when link is forced on a fiber connection.
1016 **/
1017s32 e1000e_force_mac_fc(struct e1000_hw *hw)
1018{
1019 u32 ctrl;
1020
1021 ctrl = er32(CTRL);
1022
1023 /*
1024 * Because we didn't get link via the internal auto-negotiation
1025 * mechanism (we either forced link or we got link via PHY
1026 * auto-neg), we have to manually enable/disable transmit an
1027 * receive flow control.
1028 *
1029 * The "Case" statement below enables/disable flow control
1030 * according to the "hw->fc.current_mode" parameter.
1031 *
1032 * The possible values of the "fc" parameter are:
1033 * 0: Flow control is completely disabled
1034 * 1: Rx flow control is enabled (we can receive pause
1035 * frames but not send pause frames).
1036 * 2: Tx flow control is enabled (we can send pause frames
1037 * frames but we do not receive pause frames).
1038 * 3: Both Rx and Tx flow control (symmetric) is enabled.
1039 * other: No other values should be possible at this point.
1040 */
1041 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
1042
1043 switch (hw->fc.current_mode) {
1044 case e1000_fc_none:
1045 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
1046 break;
1047 case e1000_fc_rx_pause:
1048 ctrl &= (~E1000_CTRL_TFCE);
1049 ctrl |= E1000_CTRL_RFCE;
1050 break;
1051 case e1000_fc_tx_pause:
1052 ctrl &= (~E1000_CTRL_RFCE);
1053 ctrl |= E1000_CTRL_TFCE;
1054 break;
1055 case e1000_fc_full:
1056 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
1057 break;
1058 default:
1059 e_dbg("Flow control param set incorrectly\n");
1060 return -E1000_ERR_CONFIG;
1061 }
1062
1063 ew32(CTRL, ctrl);
1064
1065 return 0;
1066}
1067
1068/**
1069 * e1000e_config_fc_after_link_up - Configures flow control after link
1070 * @hw: pointer to the HW structure
1071 *
1072 * Checks the status of auto-negotiation after link up to ensure that the
1073 * speed and duplex were not forced. If the link needed to be forced, then
1074 * flow control needs to be forced also. If auto-negotiation is enabled
1075 * and did not fail, then we configure flow control based on our link
1076 * partner.
1077 **/
1078s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1079{
1080 struct e1000_mac_info *mac = &hw->mac;
1081 s32 ret_val = 0;
1082 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1083 u16 speed, duplex;
1084
1085 /*
1086 * Check for the case where we have fiber media and auto-neg failed
1087 * so we had to force link. In this case, we need to force the
1088 * configuration of the MAC to match the "fc" parameter.
1089 */
1090 if (mac->autoneg_failed) {
1091 if (hw->phy.media_type == e1000_media_type_fiber ||
1092 hw->phy.media_type == e1000_media_type_internal_serdes)
1093 ret_val = e1000e_force_mac_fc(hw);
1094 } else {
1095 if (hw->phy.media_type == e1000_media_type_copper)
1096 ret_val = e1000e_force_mac_fc(hw);
1097 }
1098
1099 if (ret_val) {
1100 e_dbg("Error forcing flow control settings\n");
1101 return ret_val;
1102 }
1103
1104 /*
1105 * Check for the case where we have copper media and auto-neg is
1106 * enabled. In this case, we need to check and see if Auto-Neg
1107 * has completed, and if so, how the PHY and link partner has
1108 * flow control configured.
1109 */
1110 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1111 /*
1112 * Read the MII Status Register and check to see if AutoNeg
1113 * has completed. We read this twice because this reg has
1114 * some "sticky" (latched) bits.
1115 */
1116 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1117 if (ret_val)
1118 return ret_val;
1119 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1120 if (ret_val)
1121 return ret_val;
1122
1123 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1124 e_dbg("Copper PHY and Auto Neg "
1125 "has not completed.\n");
1126 return ret_val;
1127 }
1128
1129 /*
1130 * The AutoNeg process has completed, so we now need to
1131 * read both the Auto Negotiation Advertisement
1132 * Register (Address 4) and the Auto_Negotiation Base
1133 * Page Ability Register (Address 5) to determine how
1134 * flow control was negotiated.
1135 */
1136 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1137 if (ret_val)
1138 return ret_val;
1139 ret_val =
1140 e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1141 if (ret_val)
1142 return ret_val;
1143
1144 /*
1145 * Two bits in the Auto Negotiation Advertisement Register
1146 * (Address 4) and two bits in the Auto Negotiation Base
1147 * Page Ability Register (Address 5) determine flow control
1148 * for both the PHY and the link partner. The following
1149 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1150 * 1999, describes these PAUSE resolution bits and how flow
1151 * control is determined based upon these settings.
1152 * NOTE: DC = Don't Care
1153 *
1154 * LOCAL DEVICE | LINK PARTNER
1155 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1156 *-------|---------|-------|---------|--------------------
1157 * 0 | 0 | DC | DC | e1000_fc_none
1158 * 0 | 1 | 0 | DC | e1000_fc_none
1159 * 0 | 1 | 1 | 0 | e1000_fc_none
1160 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1161 * 1 | 0 | 0 | DC | e1000_fc_none
1162 * 1 | DC | 1 | DC | e1000_fc_full
1163 * 1 | 1 | 0 | 0 | e1000_fc_none
1164 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1165 *
1166 * Are both PAUSE bits set to 1? If so, this implies
1167 * Symmetric Flow Control is enabled at both ends. The
1168 * ASM_DIR bits are irrelevant per the spec.
1169 *
1170 * For Symmetric Flow Control:
1171 *
1172 * LOCAL DEVICE | LINK PARTNER
1173 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1174 *-------|---------|-------|---------|--------------------
1175 * 1 | DC | 1 | DC | E1000_fc_full
1176 *
1177 */
1178 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1179 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1180 /*
1181 * Now we need to check if the user selected Rx ONLY
1182 * of pause frames. In this case, we had to advertise
1183 * FULL flow control because we could not advertise Rx
1184 * ONLY. Hence, we must now check to see if we need to
1185 * turn OFF the TRANSMISSION of PAUSE frames.
1186 */
1187 if (hw->fc.requested_mode == e1000_fc_full) {
1188 hw->fc.current_mode = e1000_fc_full;
1189 e_dbg("Flow Control = FULL.\r\n");
1190 } else {
1191 hw->fc.current_mode = e1000_fc_rx_pause;
1192 e_dbg("Flow Control = "
1193 "Rx PAUSE frames only.\r\n");
1194 }
1195 }
1196 /*
1197 * For receiving PAUSE frames ONLY.
1198 *
1199 * LOCAL DEVICE | LINK PARTNER
1200 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1201 *-------|---------|-------|---------|--------------------
1202 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1203 */
1204 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1205 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1206 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1207 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1208 hw->fc.current_mode = e1000_fc_tx_pause;
1209 e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
1210 }
1211 /*
1212 * For transmitting PAUSE frames ONLY.
1213 *
1214 * LOCAL DEVICE | LINK PARTNER
1215 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1216 *-------|---------|-------|---------|--------------------
1217 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1218 */
1219 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1220 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1221 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1222 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1223 hw->fc.current_mode = e1000_fc_rx_pause;
1224 e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
1225 } else {
1226 /*
1227 * Per the IEEE spec, at this point flow control
1228 * should be disabled.
1229 */
1230 hw->fc.current_mode = e1000_fc_none;
1231 e_dbg("Flow Control = NONE.\r\n");
1232 }
1233
1234 /*
1235 * Now we need to do one last check... If we auto-
1236 * negotiated to HALF DUPLEX, flow control should not be
1237 * enabled per IEEE 802.3 spec.
1238 */
1239 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1240 if (ret_val) {
1241 e_dbg("Error getting link speed and duplex\n");
1242 return ret_val;
1243 }
1244
1245 if (duplex == HALF_DUPLEX)
1246 hw->fc.current_mode = e1000_fc_none;
1247
1248 /*
1249 * Now we call a subroutine to actually force the MAC
1250 * controller to use the correct flow control settings.
1251 */
1252 ret_val = e1000e_force_mac_fc(hw);
1253 if (ret_val) {
1254 e_dbg("Error forcing flow control settings\n");
1255 return ret_val;
1256 }
1257 }
1258
1259 return 0;
1260}
1261
1262/**
1263 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
1264 * @hw: pointer to the HW structure
1265 * @speed: stores the current speed
1266 * @duplex: stores the current duplex
1267 *
1268 * Read the status register for the current speed/duplex and store the current
1269 * speed and duplex for copper connections.
1270 **/
1271s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1272{
1273 u32 status;
1274
1275 status = er32(STATUS);
1276 if (status & E1000_STATUS_SPEED_1000)
1277 *speed = SPEED_1000;
1278 else if (status & E1000_STATUS_SPEED_100)
1279 *speed = SPEED_100;
1280 else
1281 *speed = SPEED_10;
1282
1283 if (status & E1000_STATUS_FD)
1284 *duplex = FULL_DUPLEX;
1285 else
1286 *duplex = HALF_DUPLEX;
1287
1288 e_dbg("%u Mbps, %s Duplex\n",
1289 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1290 *duplex == FULL_DUPLEX ? "Full" : "Half");
1291
1292 return 0;
1293}
1294
1295/**
1296 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
1297 * @hw: pointer to the HW structure
1298 * @speed: stores the current speed
1299 * @duplex: stores the current duplex
1300 *
1301 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1302 * for fiber/serdes links.
1303 **/
1304s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1305{
1306 *speed = SPEED_1000;
1307 *duplex = FULL_DUPLEX;
1308
1309 return 0;
1310}
1311
1312/**
1313 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1314 * @hw: pointer to the HW structure
1315 *
1316 * Acquire the HW semaphore to access the PHY or NVM
1317 **/
1318s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1319{
1320 u32 swsm;
1321 s32 timeout = hw->nvm.word_size + 1;
1322 s32 i = 0;
1323
1324 /* Get the SW semaphore */
1325 while (i < timeout) {
1326 swsm = er32(SWSM);
1327 if (!(swsm & E1000_SWSM_SMBI))
1328 break;
1329
1330 udelay(50);
1331 i++;
1332 }
1333
1334 if (i == timeout) {
1335 e_dbg("Driver can't access device - SMBI bit is set.\n");
1336 return -E1000_ERR_NVM;
1337 }
1338
1339 /* Get the FW semaphore. */
1340 for (i = 0; i < timeout; i++) {
1341 swsm = er32(SWSM);
1342 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1343
1344 /* Semaphore acquired if bit latched */
1345 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1346 break;
1347
1348 udelay(50);
1349 }
1350
1351 if (i == timeout) {
1352 /* Release semaphores */
1353 e1000e_put_hw_semaphore(hw);
1354 e_dbg("Driver can't access the NVM\n");
1355 return -E1000_ERR_NVM;
1356 }
1357
1358 return 0;
1359}
1360
1361/**
1362 * e1000e_put_hw_semaphore - Release hardware semaphore
1363 * @hw: pointer to the HW structure
1364 *
1365 * Release hardware semaphore used to access the PHY or NVM
1366 **/
1367void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1368{
1369 u32 swsm;
1370
1371 swsm = er32(SWSM);
1372 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1373 ew32(SWSM, swsm);
1374}
1375
1376/**
1377 * e1000e_get_auto_rd_done - Check for auto read completion
1378 * @hw: pointer to the HW structure
1379 *
1380 * Check EEPROM for Auto Read done bit.
1381 **/
1382s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1383{
1384 s32 i = 0;
1385
1386 while (i < AUTO_READ_DONE_TIMEOUT) {
1387 if (er32(EECD) & E1000_EECD_AUTO_RD)
1388 break;
1389 usleep_range(1000, 2000);
1390 i++;
1391 }
1392
1393 if (i == AUTO_READ_DONE_TIMEOUT) {
1394 e_dbg("Auto read by HW from NVM has not completed.\n");
1395 return -E1000_ERR_RESET;
1396 }
1397
1398 return 0;
1399}
1400
1401/**
1402 * e1000e_valid_led_default - Verify a valid default LED config
1403 * @hw: pointer to the HW structure
1404 * @data: pointer to the NVM (EEPROM)
1405 *
1406 * Read the EEPROM for the current default LED configuration. If the
1407 * LED configuration is not valid, set to a valid LED configuration.
1408 **/
1409s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1410{
1411 s32 ret_val;
1412
1413 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1414 if (ret_val) {
1415 e_dbg("NVM Read Error\n");
1416 return ret_val;
1417 }
1418
1419 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1420 *data = ID_LED_DEFAULT;
1421
1422 return 0;
1423}
1424
1425/**
1426 * e1000e_id_led_init -
1427 * @hw: pointer to the HW structure
1428 *
1429 **/
1430s32 e1000e_id_led_init(struct e1000_hw *hw)
1431{
1432 struct e1000_mac_info *mac = &hw->mac;
1433 s32 ret_val;
1434 const u32 ledctl_mask = 0x000000FF;
1435 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1436 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1437 u16 data, i, temp;
1438 const u16 led_mask = 0x0F;
1439
1440 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1441 if (ret_val)
1442 return ret_val;
1443
1444 mac->ledctl_default = er32(LEDCTL);
1445 mac->ledctl_mode1 = mac->ledctl_default;
1446 mac->ledctl_mode2 = mac->ledctl_default;
1447
1448 for (i = 0; i < 4; i++) {
1449 temp = (data >> (i << 2)) & led_mask;
1450 switch (temp) {
1451 case ID_LED_ON1_DEF2:
1452 case ID_LED_ON1_ON2:
1453 case ID_LED_ON1_OFF2:
1454 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1455 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1456 break;
1457 case ID_LED_OFF1_DEF2:
1458 case ID_LED_OFF1_ON2:
1459 case ID_LED_OFF1_OFF2:
1460 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1461 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1462 break;
1463 default:
1464 /* Do nothing */
1465 break;
1466 }
1467 switch (temp) {
1468 case ID_LED_DEF1_ON2:
1469 case ID_LED_ON1_ON2:
1470 case ID_LED_OFF1_ON2:
1471 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1472 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1473 break;
1474 case ID_LED_DEF1_OFF2:
1475 case ID_LED_ON1_OFF2:
1476 case ID_LED_OFF1_OFF2:
1477 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1478 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1479 break;
1480 default:
1481 /* Do nothing */
1482 break;
1483 }
1484 }
1485
1486 return 0;
1487}
1488
1489/**
1490 * e1000e_setup_led_generic - Configures SW controllable LED
1491 * @hw: pointer to the HW structure
1492 *
1493 * This prepares the SW controllable LED for use and saves the current state
1494 * of the LED so it can be later restored.
1495 **/
1496s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1497{
1498 u32 ledctl;
1499
1500 if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
1501 return -E1000_ERR_CONFIG;
1502
1503 if (hw->phy.media_type == e1000_media_type_fiber) {
1504 ledctl = er32(LEDCTL);
1505 hw->mac.ledctl_default = ledctl;
1506 /* Turn off LED0 */
1507 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
1508 E1000_LEDCTL_LED0_BLINK |
1509 E1000_LEDCTL_LED0_MODE_MASK);
1510 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1511 E1000_LEDCTL_LED0_MODE_SHIFT);
1512 ew32(LEDCTL, ledctl);
1513 } else if (hw->phy.media_type == e1000_media_type_copper) {
1514 ew32(LEDCTL, hw->mac.ledctl_mode1);
1515 }
1516
1517 return 0;
1518}
1519
1520/**
1521 * e1000e_cleanup_led_generic - Set LED config to default operation
1522 * @hw: pointer to the HW structure
1523 *
1524 * Remove the current LED configuration and set the LED configuration
1525 * to the default value, saved from the EEPROM.
1526 **/
1527s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1528{
1529 ew32(LEDCTL, hw->mac.ledctl_default);
1530 return 0;
1531}
1532
1533/**
1534 * e1000e_blink_led_generic - Blink LED
1535 * @hw: pointer to the HW structure
1536 *
1537 * Blink the LEDs which are set to be on.
1538 **/
1539s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1540{
1541 u32 ledctl_blink = 0;
1542 u32 i;
1543
1544 if (hw->phy.media_type == e1000_media_type_fiber) {
1545 /* always blink LED0 for PCI-E fiber */
1546 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1547 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1548 } else {
1549 /*
1550 * set the blink bit for each LED that's "on" (0x0E)
1551 * in ledctl_mode2
1552 */
1553 ledctl_blink = hw->mac.ledctl_mode2;
1554 for (i = 0; i < 4; i++)
1555 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1556 E1000_LEDCTL_MODE_LED_ON)
1557 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1558 (i * 8));
1559 }
1560
1561 ew32(LEDCTL, ledctl_blink);
1562
1563 return 0;
1564}
1565
1566/**
1567 * e1000e_led_on_generic - Turn LED on
1568 * @hw: pointer to the HW structure
1569 *
1570 * Turn LED on.
1571 **/
1572s32 e1000e_led_on_generic(struct e1000_hw *hw)
1573{
1574 u32 ctrl;
1575
1576 switch (hw->phy.media_type) {
1577 case e1000_media_type_fiber:
1578 ctrl = er32(CTRL);
1579 ctrl &= ~E1000_CTRL_SWDPIN0;
1580 ctrl |= E1000_CTRL_SWDPIO0;
1581 ew32(CTRL, ctrl);
1582 break;
1583 case e1000_media_type_copper:
1584 ew32(LEDCTL, hw->mac.ledctl_mode2);
1585 break;
1586 default:
1587 break;
1588 }
1589
1590 return 0;
1591}
1592
1593/**
1594 * e1000e_led_off_generic - Turn LED off
1595 * @hw: pointer to the HW structure
1596 *
1597 * Turn LED off.
1598 **/
1599s32 e1000e_led_off_generic(struct e1000_hw *hw)
1600{
1601 u32 ctrl;
1602
1603 switch (hw->phy.media_type) {
1604 case e1000_media_type_fiber:
1605 ctrl = er32(CTRL);
1606 ctrl |= E1000_CTRL_SWDPIN0;
1607 ctrl |= E1000_CTRL_SWDPIO0;
1608 ew32(CTRL, ctrl);
1609 break;
1610 case e1000_media_type_copper:
1611 ew32(LEDCTL, hw->mac.ledctl_mode1);
1612 break;
1613 default:
1614 break;
1615 }
1616
1617 return 0;
1618}
1619
1620/**
1621 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1622 * @hw: pointer to the HW structure
1623 * @no_snoop: bitmap of snoop events
1624 *
1625 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1626 **/
1627void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1628{
1629 u32 gcr;
1630
1631 if (no_snoop) {
1632 gcr = er32(GCR);
1633 gcr &= ~(PCIE_NO_SNOOP_ALL);
1634 gcr |= no_snoop;
1635 ew32(GCR, gcr);
1636 }
1637}
1638
1639/**
1640 * e1000e_disable_pcie_master - Disables PCI-express master access
1641 * @hw: pointer to the HW structure
1642 *
1643 * Returns 0 if successful, else returns -10
1644 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1645 * the master requests to be disabled.
1646 *
1647 * Disables PCI-Express master access and verifies there are no pending
1648 * requests.
1649 **/
1650s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1651{
1652 u32 ctrl;
1653 s32 timeout = MASTER_DISABLE_TIMEOUT;
1654
1655 ctrl = er32(CTRL);
1656 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1657 ew32(CTRL, ctrl);
1658
1659 while (timeout) {
1660 if (!(er32(STATUS) &
1661 E1000_STATUS_GIO_MASTER_ENABLE))
1662 break;
1663 udelay(100);
1664 timeout--;
1665 }
1666
1667 if (!timeout) {
1668 e_dbg("Master requests are pending.\n");
1669 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1670 }
1671
1672 return 0;
1673}
1674
1675/**
1676 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1677 * @hw: pointer to the HW structure
1678 *
1679 * Reset the Adaptive Interframe Spacing throttle to default values.
1680 **/
1681void e1000e_reset_adaptive(struct e1000_hw *hw)
1682{
1683 struct e1000_mac_info *mac = &hw->mac;
1684
1685 if (!mac->adaptive_ifs) {
1686 e_dbg("Not in Adaptive IFS mode!\n");
1687 goto out;
1688 }
1689
1690 mac->current_ifs_val = 0;
1691 mac->ifs_min_val = IFS_MIN;
1692 mac->ifs_max_val = IFS_MAX;
1693 mac->ifs_step_size = IFS_STEP;
1694 mac->ifs_ratio = IFS_RATIO;
1695
1696 mac->in_ifs_mode = false;
1697 ew32(AIT, 0);
1698out:
1699 return;
1700}
1701
1702/**
1703 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1704 * @hw: pointer to the HW structure
1705 *
1706 * Update the Adaptive Interframe Spacing Throttle value based on the
1707 * time between transmitted packets and time between collisions.
1708 **/
1709void e1000e_update_adaptive(struct e1000_hw *hw)
1710{
1711 struct e1000_mac_info *mac = &hw->mac;
1712
1713 if (!mac->adaptive_ifs) {
1714 e_dbg("Not in Adaptive IFS mode!\n");
1715 goto out;
1716 }
1717
1718 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1719 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1720 mac->in_ifs_mode = true;
1721 if (mac->current_ifs_val < mac->ifs_max_val) {
1722 if (!mac->current_ifs_val)
1723 mac->current_ifs_val = mac->ifs_min_val;
1724 else
1725 mac->current_ifs_val +=
1726 mac->ifs_step_size;
1727 ew32(AIT, mac->current_ifs_val);
1728 }
1729 }
1730 } else {
1731 if (mac->in_ifs_mode &&
1732 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1733 mac->current_ifs_val = 0;
1734 mac->in_ifs_mode = false;
1735 ew32(AIT, 0);
1736 }
1737 }
1738out:
1739 return;
1740}
1741
1742/**
1743 * e1000_raise_eec_clk - Raise EEPROM clock
1744 * @hw: pointer to the HW structure
1745 * @eecd: pointer to the EEPROM
1746 *
1747 * Enable/Raise the EEPROM clock bit.
1748 **/
1749static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1750{
1751 *eecd = *eecd | E1000_EECD_SK;
1752 ew32(EECD, *eecd);
1753 e1e_flush();
1754 udelay(hw->nvm.delay_usec);
1755}
1756
1757/**
1758 * e1000_lower_eec_clk - Lower EEPROM clock
1759 * @hw: pointer to the HW structure
1760 * @eecd: pointer to the EEPROM
1761 *
1762 * Clear/Lower the EEPROM clock bit.
1763 **/
1764static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1765{
1766 *eecd = *eecd & ~E1000_EECD_SK;
1767 ew32(EECD, *eecd);
1768 e1e_flush();
1769 udelay(hw->nvm.delay_usec);
1770}
1771
1772/**
1773 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1774 * @hw: pointer to the HW structure
1775 * @data: data to send to the EEPROM
1776 * @count: number of bits to shift out
1777 *
1778 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1779 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1780 * In order to do this, "data" must be broken down into bits.
1781 **/
1782static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1783{
1784 struct e1000_nvm_info *nvm = &hw->nvm;
1785 u32 eecd = er32(EECD);
1786 u32 mask;
1787
1788 mask = 0x01 << (count - 1);
1789 if (nvm->type == e1000_nvm_eeprom_spi)
1790 eecd |= E1000_EECD_DO;
1791
1792 do {
1793 eecd &= ~E1000_EECD_DI;
1794
1795 if (data & mask)
1796 eecd |= E1000_EECD_DI;
1797
1798 ew32(EECD, eecd);
1799 e1e_flush();
1800
1801 udelay(nvm->delay_usec);
1802
1803 e1000_raise_eec_clk(hw, &eecd);
1804 e1000_lower_eec_clk(hw, &eecd);
1805
1806 mask >>= 1;
1807 } while (mask);
1808
1809 eecd &= ~E1000_EECD_DI;
1810 ew32(EECD, eecd);
1811}
1812
1813/**
1814 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1815 * @hw: pointer to the HW structure
1816 * @count: number of bits to shift in
1817 *
1818 * In order to read a register from the EEPROM, we need to shift 'count' bits
1819 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1820 * the EEPROM (setting the SK bit), and then reading the value of the data out
1821 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1822 * always be clear.
1823 **/
1824static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1825{
1826 u32 eecd;
1827 u32 i;
1828 u16 data;
1829
1830 eecd = er32(EECD);
1831
1832 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1833 data = 0;
1834
1835 for (i = 0; i < count; i++) {
1836 data <<= 1;
1837 e1000_raise_eec_clk(hw, &eecd);
1838
1839 eecd = er32(EECD);
1840
1841 eecd &= ~E1000_EECD_DI;
1842 if (eecd & E1000_EECD_DO)
1843 data |= 1;
1844
1845 e1000_lower_eec_clk(hw, &eecd);
1846 }
1847
1848 return data;
1849}
1850
1851/**
1852 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1853 * @hw: pointer to the HW structure
1854 * @ee_reg: EEPROM flag for polling
1855 *
1856 * Polls the EEPROM status bit for either read or write completion based
1857 * upon the value of 'ee_reg'.
1858 **/
1859s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1860{
1861 u32 attempts = 100000;
1862 u32 i, reg = 0;
1863
1864 for (i = 0; i < attempts; i++) {
1865 if (ee_reg == E1000_NVM_POLL_READ)
1866 reg = er32(EERD);
1867 else
1868 reg = er32(EEWR);
1869
1870 if (reg & E1000_NVM_RW_REG_DONE)
1871 return 0;
1872
1873 udelay(5);
1874 }
1875
1876 return -E1000_ERR_NVM;
1877}
1878
1879/**
1880 * e1000e_acquire_nvm - Generic request for access to EEPROM
1881 * @hw: pointer to the HW structure
1882 *
1883 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1884 * Return successful if access grant bit set, else clear the request for
1885 * EEPROM access and return -E1000_ERR_NVM (-1).
1886 **/
1887s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1888{
1889 u32 eecd = er32(EECD);
1890 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1891
1892 ew32(EECD, eecd | E1000_EECD_REQ);
1893 eecd = er32(EECD);
1894
1895 while (timeout) {
1896 if (eecd & E1000_EECD_GNT)
1897 break;
1898 udelay(5);
1899 eecd = er32(EECD);
1900 timeout--;
1901 }
1902
1903 if (!timeout) {
1904 eecd &= ~E1000_EECD_REQ;
1905 ew32(EECD, eecd);
1906 e_dbg("Could not acquire NVM grant\n");
1907 return -E1000_ERR_NVM;
1908 }
1909
1910 return 0;
1911}
1912
1913/**
1914 * e1000_standby_nvm - Return EEPROM to standby state
1915 * @hw: pointer to the HW structure
1916 *
1917 * Return the EEPROM to a standby state.
1918 **/
1919static void e1000_standby_nvm(struct e1000_hw *hw)
1920{
1921 struct e1000_nvm_info *nvm = &hw->nvm;
1922 u32 eecd = er32(EECD);
1923
1924 if (nvm->type == e1000_nvm_eeprom_spi) {
1925 /* Toggle CS to flush commands */
1926 eecd |= E1000_EECD_CS;
1927 ew32(EECD, eecd);
1928 e1e_flush();
1929 udelay(nvm->delay_usec);
1930 eecd &= ~E1000_EECD_CS;
1931 ew32(EECD, eecd);
1932 e1e_flush();
1933 udelay(nvm->delay_usec);
1934 }
1935}
1936
1937/**
1938 * e1000_stop_nvm - Terminate EEPROM command
1939 * @hw: pointer to the HW structure
1940 *
1941 * Terminates the current command by inverting the EEPROM's chip select pin.
1942 **/
1943static void e1000_stop_nvm(struct e1000_hw *hw)
1944{
1945 u32 eecd;
1946
1947 eecd = er32(EECD);
1948 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1949 /* Pull CS high */
1950 eecd |= E1000_EECD_CS;
1951 e1000_lower_eec_clk(hw, &eecd);
1952 }
1953}
1954
1955/**
1956 * e1000e_release_nvm - Release exclusive access to EEPROM
1957 * @hw: pointer to the HW structure
1958 *
1959 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1960 **/
1961void e1000e_release_nvm(struct e1000_hw *hw)
1962{
1963 u32 eecd;
1964
1965 e1000_stop_nvm(hw);
1966
1967 eecd = er32(EECD);
1968 eecd &= ~E1000_EECD_REQ;
1969 ew32(EECD, eecd);
1970}
1971
1972/**
1973 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1974 * @hw: pointer to the HW structure
1975 *
1976 * Setups the EEPROM for reading and writing.
1977 **/
1978static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1979{
1980 struct e1000_nvm_info *nvm = &hw->nvm;
1981 u32 eecd = er32(EECD);
1982 u8 spi_stat_reg;
1983
1984 if (nvm->type == e1000_nvm_eeprom_spi) {
1985 u16 timeout = NVM_MAX_RETRY_SPI;
1986
1987 /* Clear SK and CS */
1988 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1989 ew32(EECD, eecd);
1990 e1e_flush();
1991 udelay(1);
1992
1993 /*
1994 * Read "Status Register" repeatedly until the LSB is cleared.
1995 * The EEPROM will signal that the command has been completed
1996 * by clearing bit 0 of the internal status register. If it's
1997 * not cleared within 'timeout', then error out.
1998 */
1999 while (timeout) {
2000 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
2001 hw->nvm.opcode_bits);
2002 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
2003 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
2004 break;
2005
2006 udelay(5);
2007 e1000_standby_nvm(hw);
2008 timeout--;
2009 }
2010
2011 if (!timeout) {
2012 e_dbg("SPI NVM Status error\n");
2013 return -E1000_ERR_NVM;
2014 }
2015 }
2016
2017 return 0;
2018}
2019
2020/**
2021 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
2022 * @hw: pointer to the HW structure
2023 * @offset: offset of word in the EEPROM to read
2024 * @words: number of words to read
2025 * @data: word read from the EEPROM
2026 *
2027 * Reads a 16 bit word from the EEPROM using the EERD register.
2028 **/
2029s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2030{
2031 struct e1000_nvm_info *nvm = &hw->nvm;
2032 u32 i, eerd = 0;
2033 s32 ret_val = 0;
2034
2035 /*
2036 * A check for invalid values: offset too large, too many words,
2037 * too many words for the offset, and not enough words.
2038 */
2039 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
2040 (words == 0)) {
2041 e_dbg("nvm parameter(s) out of bounds\n");
2042 return -E1000_ERR_NVM;
2043 }
2044
2045 for (i = 0; i < words; i++) {
2046 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
2047 E1000_NVM_RW_REG_START;
2048
2049 ew32(EERD, eerd);
2050 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
2051 if (ret_val)
2052 break;
2053
2054 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
2055 }
2056
2057 return ret_val;
2058}
2059
2060/**
2061 * e1000e_write_nvm_spi - Write to EEPROM using SPI
2062 * @hw: pointer to the HW structure
2063 * @offset: offset within the EEPROM to be written to
2064 * @words: number of words to write
2065 * @data: 16 bit word(s) to be written to the EEPROM
2066 *
2067 * Writes data to EEPROM at offset using SPI interface.
2068 *
2069 * If e1000e_update_nvm_checksum is not called after this function , the
2070 * EEPROM will most likely contain an invalid checksum.
2071 **/
2072s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2073{
2074 struct e1000_nvm_info *nvm = &hw->nvm;
2075 s32 ret_val;
2076 u16 widx = 0;
2077
2078 /*
2079 * A check for invalid values: offset too large, too many words,
2080 * and not enough words.
2081 */
2082 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
2083 (words == 0)) {
2084 e_dbg("nvm parameter(s) out of bounds\n");
2085 return -E1000_ERR_NVM;
2086 }
2087
2088 ret_val = nvm->ops.acquire(hw);
2089 if (ret_val)
2090 return ret_val;
2091
2092 while (widx < words) {
2093 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2094
2095 ret_val = e1000_ready_nvm_eeprom(hw);
2096 if (ret_val) {
2097 nvm->ops.release(hw);
2098 return ret_val;
2099 }
2100
2101 e1000_standby_nvm(hw);
2102
2103 /* Send the WRITE ENABLE command (8 bit opcode) */
2104 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2105 nvm->opcode_bits);
2106
2107 e1000_standby_nvm(hw);
2108
2109 /*
2110 * Some SPI eeproms use the 8th address bit embedded in the
2111 * opcode
2112 */
2113 if ((nvm->address_bits == 8) && (offset >= 128))
2114 write_opcode |= NVM_A8_OPCODE_SPI;
2115
2116 /* Send the Write command (8-bit opcode + addr) */
2117 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2118 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2119 nvm->address_bits);
2120
2121 /* Loop to allow for up to whole page write of eeprom */
2122 while (widx < words) {
2123 u16 word_out = data[widx];
2124 word_out = (word_out >> 8) | (word_out << 8);
2125 e1000_shift_out_eec_bits(hw, word_out, 16);
2126 widx++;
2127
2128 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2129 e1000_standby_nvm(hw);
2130 break;
2131 }
2132 }
2133 }
2134
2135 usleep_range(10000, 20000);
2136 nvm->ops.release(hw);
2137 return 0;
2138}
2139
2140/**
2141 * e1000_read_pba_string_generic - Read device part number
2142 * @hw: pointer to the HW structure
2143 * @pba_num: pointer to device part number
2144 * @pba_num_size: size of part number buffer
2145 *
2146 * Reads the product board assembly (PBA) number from the EEPROM and stores
2147 * the value in pba_num.
2148 **/
2149s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
2150 u32 pba_num_size)
2151{
2152 s32 ret_val;
2153 u16 nvm_data;
2154 u16 pba_ptr;
2155 u16 offset;
2156 u16 length;
2157
2158 if (pba_num == NULL) {
2159 e_dbg("PBA string buffer was null\n");
2160 ret_val = E1000_ERR_INVALID_ARGUMENT;
2161 goto out;
2162 }
2163
2164 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2165 if (ret_val) {
2166 e_dbg("NVM Read Error\n");
2167 goto out;
2168 }
2169
2170 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
2171 if (ret_val) {
2172 e_dbg("NVM Read Error\n");
2173 goto out;
2174 }
2175
2176 /*
2177 * if nvm_data is not ptr guard the PBA must be in legacy format which
2178 * means pba_ptr is actually our second data word for the PBA number
2179 * and we can decode it into an ascii string
2180 */
2181 if (nvm_data != NVM_PBA_PTR_GUARD) {
2182 e_dbg("NVM PBA number is not stored as string\n");
2183
2184 /* we will need 11 characters to store the PBA */
2185 if (pba_num_size < 11) {
2186 e_dbg("PBA string buffer too small\n");
2187 return E1000_ERR_NO_SPACE;
2188 }
2189
2190 /* extract hex string from data and pba_ptr */
2191 pba_num[0] = (nvm_data >> 12) & 0xF;
2192 pba_num[1] = (nvm_data >> 8) & 0xF;
2193 pba_num[2] = (nvm_data >> 4) & 0xF;
2194 pba_num[3] = nvm_data & 0xF;
2195 pba_num[4] = (pba_ptr >> 12) & 0xF;
2196 pba_num[5] = (pba_ptr >> 8) & 0xF;
2197 pba_num[6] = '-';
2198 pba_num[7] = 0;
2199 pba_num[8] = (pba_ptr >> 4) & 0xF;
2200 pba_num[9] = pba_ptr & 0xF;
2201
2202 /* put a null character on the end of our string */
2203 pba_num[10] = '\0';
2204
2205 /* switch all the data but the '-' to hex char */
2206 for (offset = 0; offset < 10; offset++) {
2207 if (pba_num[offset] < 0xA)
2208 pba_num[offset] += '0';
2209 else if (pba_num[offset] < 0x10)
2210 pba_num[offset] += 'A' - 0xA;
2211 }
2212
2213 goto out;
2214 }
2215
2216 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
2217 if (ret_val) {
2218 e_dbg("NVM Read Error\n");
2219 goto out;
2220 }
2221
2222 if (length == 0xFFFF || length == 0) {
2223 e_dbg("NVM PBA number section invalid length\n");
2224 ret_val = E1000_ERR_NVM_PBA_SECTION;
2225 goto out;
2226 }
2227 /* check if pba_num buffer is big enough */
2228 if (pba_num_size < (((u32)length * 2) - 1)) {
2229 e_dbg("PBA string buffer too small\n");
2230 ret_val = E1000_ERR_NO_SPACE;
2231 goto out;
2232 }
2233
2234 /* trim pba length from start of string */
2235 pba_ptr++;
2236 length--;
2237
2238 for (offset = 0; offset < length; offset++) {
2239 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
2240 if (ret_val) {
2241 e_dbg("NVM Read Error\n");
2242 goto out;
2243 }
2244 pba_num[offset * 2] = (u8)(nvm_data >> 8);
2245 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
2246 }
2247 pba_num[offset * 2] = '\0';
2248
2249out:
2250 return ret_val;
2251}
2252
2253/**
2254 * e1000_read_mac_addr_generic - Read device MAC address
2255 * @hw: pointer to the HW structure
2256 *
2257 * Reads the device MAC address from the EEPROM and stores the value.
2258 * Since devices with two ports use the same EEPROM, we increment the
2259 * last bit in the MAC address for the second port.
2260 **/
2261s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2262{
2263 u32 rar_high;
2264 u32 rar_low;
2265 u16 i;
2266
2267 rar_high = er32(RAH(0));
2268 rar_low = er32(RAL(0));
2269
2270 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2271 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2272
2273 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2274 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2275
2276 for (i = 0; i < ETH_ALEN; i++)
2277 hw->mac.addr[i] = hw->mac.perm_addr[i];
2278
2279 return 0;
2280}
2281
2282/**
2283 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2284 * @hw: pointer to the HW structure
2285 *
2286 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2287 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2288 **/
2289s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2290{
2291 s32 ret_val;
2292 u16 checksum = 0;
2293 u16 i, nvm_data;
2294
2295 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2296 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2297 if (ret_val) {
2298 e_dbg("NVM Read Error\n");
2299 return ret_val;
2300 }
2301 checksum += nvm_data;
2302 }
2303
2304 if (checksum != (u16) NVM_SUM) {
2305 e_dbg("NVM Checksum Invalid\n");
2306 return -E1000_ERR_NVM;
2307 }
2308
2309 return 0;
2310}
2311
2312/**
2313 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2314 * @hw: pointer to the HW structure
2315 *
2316 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2317 * up to the checksum. Then calculates the EEPROM checksum and writes the
2318 * value to the EEPROM.
2319 **/
2320s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2321{
2322 s32 ret_val;
2323 u16 checksum = 0;
2324 u16 i, nvm_data;
2325
2326 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2327 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2328 if (ret_val) {
2329 e_dbg("NVM Read Error while updating checksum.\n");
2330 return ret_val;
2331 }
2332 checksum += nvm_data;
2333 }
2334 checksum = (u16) NVM_SUM - checksum;
2335 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2336 if (ret_val)
2337 e_dbg("NVM Write Error while updating checksum.\n");
2338
2339 return ret_val;
2340}
2341
2342/**
2343 * e1000e_reload_nvm - Reloads EEPROM
2344 * @hw: pointer to the HW structure
2345 *
2346 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2347 * extended control register.
2348 **/
2349void e1000e_reload_nvm(struct e1000_hw *hw)
2350{
2351 u32 ctrl_ext;
2352
2353 udelay(10);
2354 ctrl_ext = er32(CTRL_EXT);
2355 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2356 ew32(CTRL_EXT, ctrl_ext);
2357 e1e_flush();
2358}
2359
2360/**
2361 * e1000_calculate_checksum - Calculate checksum for buffer
2362 * @buffer: pointer to EEPROM
2363 * @length: size of EEPROM to calculate a checksum for
2364 *
2365 * Calculates the checksum for some buffer on a specified length. The
2366 * checksum calculated is returned.
2367 **/
2368static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2369{
2370 u32 i;
2371 u8 sum = 0;
2372
2373 if (!buffer)
2374 return 0;
2375
2376 for (i = 0; i < length; i++)
2377 sum += buffer[i];
2378
2379 return (u8) (0 - sum);
2380}
2381
2382/**
2383 * e1000_mng_enable_host_if - Checks host interface is enabled
2384 * @hw: pointer to the HW structure
2385 *
2386 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2387 *
2388 * This function checks whether the HOST IF is enabled for command operation
2389 * and also checks whether the previous command is completed. It busy waits
2390 * in case of previous command is not completed.
2391 **/
2392static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2393{
2394 u32 hicr;
2395 u8 i;
2396
2397 if (!(hw->mac.arc_subsystem_valid)) {
2398 e_dbg("ARC subsystem not valid.\n");
2399 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2400 }
2401
2402 /* Check that the host interface is enabled. */
2403 hicr = er32(HICR);
2404 if ((hicr & E1000_HICR_EN) == 0) {
2405 e_dbg("E1000_HOST_EN bit disabled.\n");
2406 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2407 }
2408 /* check the previous command is completed */
2409 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2410 hicr = er32(HICR);
2411 if (!(hicr & E1000_HICR_C))
2412 break;
2413 mdelay(1);
2414 }
2415
2416 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2417 e_dbg("Previous command timeout failed .\n");
2418 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2419 }
2420
2421 return 0;
2422}
2423
2424/**
2425 * e1000e_check_mng_mode_generic - check management mode
2426 * @hw: pointer to the HW structure
2427 *
2428 * Reads the firmware semaphore register and returns true (>0) if
2429 * manageability is enabled, else false (0).
2430 **/
2431bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2432{
2433 u32 fwsm = er32(FWSM);
2434
2435 return (fwsm & E1000_FWSM_MODE_MASK) ==
2436 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2437}
2438
2439/**
2440 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2441 * @hw: pointer to the HW structure
2442 *
2443 * Enables packet filtering on transmit packets if manageability is enabled
2444 * and host interface is enabled.
2445 **/
2446bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2447{
2448 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2449 u32 *buffer = (u32 *)&hw->mng_cookie;
2450 u32 offset;
2451 s32 ret_val, hdr_csum, csum;
2452 u8 i, len;
2453
2454 hw->mac.tx_pkt_filtering = true;
2455
2456 /* No manageability, no filtering */
2457 if (!e1000e_check_mng_mode(hw)) {
2458 hw->mac.tx_pkt_filtering = false;
2459 goto out;
2460 }
2461
2462 /*
2463 * If we can't read from the host interface for whatever
2464 * reason, disable filtering.
2465 */
2466 ret_val = e1000_mng_enable_host_if(hw);
2467 if (ret_val) {
2468 hw->mac.tx_pkt_filtering = false;
2469 goto out;
2470 }
2471
2472 /* Read in the header. Length and offset are in dwords. */
2473 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2474 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2475 for (i = 0; i < len; i++)
2476 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2477 hdr_csum = hdr->checksum;
2478 hdr->checksum = 0;
2479 csum = e1000_calculate_checksum((u8 *)hdr,
2480 E1000_MNG_DHCP_COOKIE_LENGTH);
2481 /*
2482 * If either the checksums or signature don't match, then
2483 * the cookie area isn't considered valid, in which case we
2484 * take the safe route of assuming Tx filtering is enabled.
2485 */
2486 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2487 hw->mac.tx_pkt_filtering = true;
2488 goto out;
2489 }
2490
2491 /* Cookie area is valid, make the final check for filtering. */
2492 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2493 hw->mac.tx_pkt_filtering = false;
2494 goto out;
2495 }
2496
2497out:
2498 return hw->mac.tx_pkt_filtering;
2499}
2500
2501/**
2502 * e1000_mng_write_cmd_header - Writes manageability command header
2503 * @hw: pointer to the HW structure
2504 * @hdr: pointer to the host interface command header
2505 *
2506 * Writes the command header after does the checksum calculation.
2507 **/
2508static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2509 struct e1000_host_mng_command_header *hdr)
2510{
2511 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2512
2513 /* Write the whole command header structure with new checksum. */
2514
2515 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2516
2517 length >>= 2;
2518 /* Write the relevant command block into the ram area. */
2519 for (i = 0; i < length; i++) {
2520 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2521 *((u32 *) hdr + i));
2522 e1e_flush();
2523 }
2524
2525 return 0;
2526}
2527
2528/**
2529 * e1000_mng_host_if_write - Write to the manageability host interface
2530 * @hw: pointer to the HW structure
2531 * @buffer: pointer to the host interface buffer
2532 * @length: size of the buffer
2533 * @offset: location in the buffer to write to
2534 * @sum: sum of the data (not checksum)
2535 *
2536 * This function writes the buffer content at the offset given on the host if.
2537 * It also does alignment considerations to do the writes in most efficient
2538 * way. Also fills up the sum of the buffer in *buffer parameter.
2539 **/
2540static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2541 u16 length, u16 offset, u8 *sum)
2542{
2543 u8 *tmp;
2544 u8 *bufptr = buffer;
2545 u32 data = 0;
2546 u16 remaining, i, j, prev_bytes;
2547
2548 /* sum = only sum of the data and it is not checksum */
2549
2550 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2551 return -E1000_ERR_PARAM;
2552
2553 tmp = (u8 *)&data;
2554 prev_bytes = offset & 0x3;
2555 offset >>= 2;
2556
2557 if (prev_bytes) {
2558 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2559 for (j = prev_bytes; j < sizeof(u32); j++) {
2560 *(tmp + j) = *bufptr++;
2561 *sum += *(tmp + j);
2562 }
2563 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2564 length -= j - prev_bytes;
2565 offset++;
2566 }
2567
2568 remaining = length & 0x3;
2569 length -= remaining;
2570
2571 /* Calculate length in DWORDs */
2572 length >>= 2;
2573
2574 /*
2575 * The device driver writes the relevant command block into the
2576 * ram area.
2577 */
2578 for (i = 0; i < length; i++) {
2579 for (j = 0; j < sizeof(u32); j++) {
2580 *(tmp + j) = *bufptr++;
2581 *sum += *(tmp + j);
2582 }
2583
2584 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2585 }
2586 if (remaining) {
2587 for (j = 0; j < sizeof(u32); j++) {
2588 if (j < remaining)
2589 *(tmp + j) = *bufptr++;
2590 else
2591 *(tmp + j) = 0;
2592
2593 *sum += *(tmp + j);
2594 }
2595 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2596 }
2597
2598 return 0;
2599}
2600
2601/**
2602 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2603 * @hw: pointer to the HW structure
2604 * @buffer: pointer to the host interface
2605 * @length: size of the buffer
2606 *
2607 * Writes the DHCP information to the host interface.
2608 **/
2609s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2610{
2611 struct e1000_host_mng_command_header hdr;
2612 s32 ret_val;
2613 u32 hicr;
2614
2615 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2616 hdr.command_length = length;
2617 hdr.reserved1 = 0;
2618 hdr.reserved2 = 0;
2619 hdr.checksum = 0;
2620
2621 /* Enable the host interface */
2622 ret_val = e1000_mng_enable_host_if(hw);
2623 if (ret_val)
2624 return ret_val;
2625
2626 /* Populate the host interface with the contents of "buffer". */
2627 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2628 sizeof(hdr), &(hdr.checksum));
2629 if (ret_val)
2630 return ret_val;
2631
2632 /* Write the manageability command header */
2633 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2634 if (ret_val)
2635 return ret_val;
2636
2637 /* Tell the ARC a new command is pending. */
2638 hicr = er32(HICR);
2639 ew32(HICR, hicr | E1000_HICR_C);
2640
2641 return 0;
2642}
2643
2644/**
2645 * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
2646 * @hw: pointer to the HW structure
2647 *
2648 * Verifies the hardware needs to leave interface enabled so that frames can
2649 * be directed to and from the management interface.
2650 **/
2651bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2652{
2653 u32 manc;
2654 u32 fwsm, factps;
2655 bool ret_val = false;
2656
2657 manc = er32(MANC);
2658
2659 if (!(manc & E1000_MANC_RCV_TCO_EN))
2660 goto out;
2661
2662 if (hw->mac.has_fwsm) {
2663 fwsm = er32(FWSM);
2664 factps = er32(FACTPS);
2665
2666 if (!(factps & E1000_FACTPS_MNGCG) &&
2667 ((fwsm & E1000_FWSM_MODE_MASK) ==
2668 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2669 ret_val = true;
2670 goto out;
2671 }
2672 } else if ((hw->mac.type == e1000_82574) ||
2673 (hw->mac.type == e1000_82583)) {
2674 u16 data;
2675
2676 factps = er32(FACTPS);
2677 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
2678
2679 if (!(factps & E1000_FACTPS_MNGCG) &&
2680 ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
2681 (e1000_mng_mode_pt << 13))) {
2682 ret_val = true;
2683 goto out;
2684 }
2685 } else if ((manc & E1000_MANC_SMBUS_EN) &&
2686 !(manc & E1000_MANC_ASF_EN)) {
2687 ret_val = true;
2688 goto out;
2689 }
2690
2691out:
2692 return ret_val;
2693}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
new file mode 100644
index 00000000000..fc218b7bd58
--- /dev/null
+++ b/drivers/net/e1000e/netdev.c
@@ -0,0 +1,6393 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/vmalloc.h>
36#include <linux/pagemap.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/interrupt.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/mii.h>
46#include <linux/ethtool.h>
47#include <linux/if_vlan.h>
48#include <linux/cpu.h>
49#include <linux/smp.h>
50#include <linux/pm_qos_params.h>
51#include <linux/pm_runtime.h>
52#include <linux/aer.h>
53#include <linux/prefetch.h>
54
55#include "e1000.h"
56
57#define DRV_EXTRAVERSION "-k"
58
59#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION;
62
63static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
64
65static const struct e1000_info *e1000_info_tbl[] = {
66 [board_82571] = &e1000_82571_info,
67 [board_82572] = &e1000_82572_info,
68 [board_82573] = &e1000_82573_info,
69 [board_82574] = &e1000_82574_info,
70 [board_82583] = &e1000_82583_info,
71 [board_80003es2lan] = &e1000_es2_info,
72 [board_ich8lan] = &e1000_ich8_info,
73 [board_ich9lan] = &e1000_ich9_info,
74 [board_ich10lan] = &e1000_ich10_info,
75 [board_pchlan] = &e1000_pch_info,
76 [board_pch2lan] = &e1000_pch2_info,
77};
78
79struct e1000_reg_info {
80 u32 ofs;
81 char *name;
82};
83
84#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
85#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
86#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
87#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
88#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
89
90#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
91#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
92#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
93#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
94#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
95
96static const struct e1000_reg_info e1000_reg_info_tbl[] = {
97
98 /* General Registers */
99 {E1000_CTRL, "CTRL"},
100 {E1000_STATUS, "STATUS"},
101 {E1000_CTRL_EXT, "CTRL_EXT"},
102
103 /* Interrupt Registers */
104 {E1000_ICR, "ICR"},
105
106 /* Rx Registers */
107 {E1000_RCTL, "RCTL"},
108 {E1000_RDLEN, "RDLEN"},
109 {E1000_RDH, "RDH"},
110 {E1000_RDT, "RDT"},
111 {E1000_RDTR, "RDTR"},
112 {E1000_RXDCTL(0), "RXDCTL"},
113 {E1000_ERT, "ERT"},
114 {E1000_RDBAL, "RDBAL"},
115 {E1000_RDBAH, "RDBAH"},
116 {E1000_RDFH, "RDFH"},
117 {E1000_RDFT, "RDFT"},
118 {E1000_RDFHS, "RDFHS"},
119 {E1000_RDFTS, "RDFTS"},
120 {E1000_RDFPC, "RDFPC"},
121
122 /* Tx Registers */
123 {E1000_TCTL, "TCTL"},
124 {E1000_TDBAL, "TDBAL"},
125 {E1000_TDBAH, "TDBAH"},
126 {E1000_TDLEN, "TDLEN"},
127 {E1000_TDH, "TDH"},
128 {E1000_TDT, "TDT"},
129 {E1000_TIDV, "TIDV"},
130 {E1000_TXDCTL(0), "TXDCTL"},
131 {E1000_TADV, "TADV"},
132 {E1000_TARC(0), "TARC"},
133 {E1000_TDFH, "TDFH"},
134 {E1000_TDFT, "TDFT"},
135 {E1000_TDFHS, "TDFHS"},
136 {E1000_TDFTS, "TDFTS"},
137 {E1000_TDFPC, "TDFPC"},
138
139 /* List Terminator */
140 {}
141};
142
143/*
144 * e1000_regdump - register printout routine
145 */
146static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
147{
148 int n = 0;
149 char rname[16];
150 u32 regs[8];
151
152 switch (reginfo->ofs) {
153 case E1000_RXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_RXDCTL(n));
156 break;
157 case E1000_TXDCTL(0):
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TXDCTL(n));
160 break;
161 case E1000_TARC(0):
162 for (n = 0; n < 2; n++)
163 regs[n] = __er32(hw, E1000_TARC(n));
164 break;
165 default:
166 printk(KERN_INFO "%-15s %08x\n",
167 reginfo->name, __er32(hw, reginfo->ofs));
168 return;
169 }
170
171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
172 printk(KERN_INFO "%-15s ", rname);
173 for (n = 0; n < 2; n++)
174 printk(KERN_CONT "%08x ", regs[n]);
175 printk(KERN_CONT "\n");
176}
177
178/*
179 * e1000e_dump - Print registers, Tx-ring and Rx-ring
180 */
181static void e1000e_dump(struct e1000_adapter *adapter)
182{
183 struct net_device *netdev = adapter->netdev;
184 struct e1000_hw *hw = &adapter->hw;
185 struct e1000_reg_info *reginfo;
186 struct e1000_ring *tx_ring = adapter->tx_ring;
187 struct e1000_tx_desc *tx_desc;
188 struct my_u0 {
189 u64 a;
190 u64 b;
191 } *u0;
192 struct e1000_buffer *buffer_info;
193 struct e1000_ring *rx_ring = adapter->rx_ring;
194 union e1000_rx_desc_packet_split *rx_desc_ps;
195 struct e1000_rx_desc *rx_desc;
196 struct my_u1 {
197 u64 a;
198 u64 b;
199 u64 c;
200 u64 d;
201 } *u1;
202 u32 staterr;
203 int i = 0;
204
205 if (!netif_msg_hw(adapter))
206 return;
207
208 /* Print netdevice Info */
209 if (netdev) {
210 dev_info(&adapter->pdev->dev, "Net device Info\n");
211 printk(KERN_INFO "Device Name state "
212 "trans_start last_rx\n");
213 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
214 netdev->name, netdev->state, netdev->trans_start,
215 netdev->last_rx);
216 }
217
218 /* Print Registers */
219 dev_info(&adapter->pdev->dev, "Register Dump\n");
220 printk(KERN_INFO " Register Name Value\n");
221 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
222 reginfo->name; reginfo++) {
223 e1000_regdump(hw, reginfo);
224 }
225
226 /* Print Tx Ring Summary */
227 if (!netdev || !netif_running(netdev))
228 goto exit;
229
230 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
231 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
232 " leng ntw timestamp\n");
233 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
234 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
235 0, tx_ring->next_to_use, tx_ring->next_to_clean,
236 (unsigned long long)buffer_info->dma,
237 buffer_info->length,
238 buffer_info->next_to_watch,
239 (unsigned long long)buffer_info->time_stamp);
240
241 /* Print Tx Ring */
242 if (!netif_msg_tx_done(adapter))
243 goto rx_ring_summary;
244
245 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
246
247 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
248 *
249 * Legacy Transmit Descriptor
250 * +--------------------------------------------------------------+
251 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
252 * +--------------------------------------------------------------+
253 * 8 | Special | CSS | Status | CMD | CSO | Length |
254 * +--------------------------------------------------------------+
255 * 63 48 47 36 35 32 31 24 23 16 15 0
256 *
257 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
258 * 63 48 47 40 39 32 31 16 15 8 7 0
259 * +----------------------------------------------------------------+
260 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
261 * +----------------------------------------------------------------+
262 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
263 * +----------------------------------------------------------------+
264 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
265 *
266 * Extended Data Descriptor (DTYP=0x1)
267 * +----------------------------------------------------------------+
268 * 0 | Buffer Address [63:0] |
269 * +----------------------------------------------------------------+
270 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
271 * +----------------------------------------------------------------+
272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
273 */
274 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
275 " [bi->dma ] leng ntw timestamp bi->skb "
276 "<-- Legacy format\n");
277 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
278 " [bi->dma ] leng ntw timestamp bi->skb "
279 "<-- Ext Context format\n");
280 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
281 " [bi->dma ] leng ntw timestamp bi->skb "
282 "<-- Ext Data format\n");
283 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
284 tx_desc = E1000_TX_DESC(*tx_ring, i);
285 buffer_info = &tx_ring->buffer_info[i];
286 u0 = (struct my_u0 *)tx_desc;
287 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
288 "%04X %3X %016llX %p",
289 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
290 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
291 (unsigned long long)le64_to_cpu(u0->a),
292 (unsigned long long)le64_to_cpu(u0->b),
293 (unsigned long long)buffer_info->dma,
294 buffer_info->length, buffer_info->next_to_watch,
295 (unsigned long long)buffer_info->time_stamp,
296 buffer_info->skb);
297 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
298 printk(KERN_CONT " NTC/U\n");
299 else if (i == tx_ring->next_to_use)
300 printk(KERN_CONT " NTU\n");
301 else if (i == tx_ring->next_to_clean)
302 printk(KERN_CONT " NTC\n");
303 else
304 printk(KERN_CONT "\n");
305
306 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
307 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
308 16, 1, phys_to_virt(buffer_info->dma),
309 buffer_info->length, true);
310 }
311
312 /* Print Rx Ring Summary */
313rx_ring_summary:
314 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
315 printk(KERN_INFO "Queue [NTU] [NTC]\n");
316 printk(KERN_INFO " %5d %5X %5X\n", 0,
317 rx_ring->next_to_use, rx_ring->next_to_clean);
318
319 /* Print Rx Ring */
320 if (!netif_msg_rx_status(adapter))
321 goto exit;
322
323 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
324 switch (adapter->rx_ps_pages) {
325 case 1:
326 case 2:
327 case 3:
328 /* [Extended] Packet Split Receive Descriptor Format
329 *
330 * +-----------------------------------------------------+
331 * 0 | Buffer Address 0 [63:0] |
332 * +-----------------------------------------------------+
333 * 8 | Buffer Address 1 [63:0] |
334 * +-----------------------------------------------------+
335 * 16 | Buffer Address 2 [63:0] |
336 * +-----------------------------------------------------+
337 * 24 | Buffer Address 3 [63:0] |
338 * +-----------------------------------------------------+
339 */
340 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
341 "[buffer 1 63:0 ] "
342 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
343 "[bi->skb] <-- Ext Pkt Split format\n");
344 /* [Extended] Receive Descriptor (Write-Back) Format
345 *
346 * 63 48 47 32 31 13 12 8 7 4 3 0
347 * +------------------------------------------------------+
348 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
349 * | Checksum | Ident | | Queue | | Type |
350 * +------------------------------------------------------+
351 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
352 * +------------------------------------------------------+
353 * 63 48 47 32 31 20 19 0
354 */
355 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
356 "[vl l0 ee es] "
357 "[ l3 l2 l1 hs] [reserved ] ---------------- "
358 "[bi->skb] <-- Ext Rx Write-Back format\n");
359 for (i = 0; i < rx_ring->count; i++) {
360 buffer_info = &rx_ring->buffer_info[i];
361 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
362 u1 = (struct my_u1 *)rx_desc_ps;
363 staterr =
364 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
365 if (staterr & E1000_RXD_STAT_DD) {
366 /* Descriptor Done */
367 printk(KERN_INFO "RWB[0x%03X] %016llX "
368 "%016llX %016llX %016llX "
369 "---------------- %p", i,
370 (unsigned long long)le64_to_cpu(u1->a),
371 (unsigned long long)le64_to_cpu(u1->b),
372 (unsigned long long)le64_to_cpu(u1->c),
373 (unsigned long long)le64_to_cpu(u1->d),
374 buffer_info->skb);
375 } else {
376 printk(KERN_INFO "R [0x%03X] %016llX "
377 "%016llX %016llX %016llX %016llX %p", i,
378 (unsigned long long)le64_to_cpu(u1->a),
379 (unsigned long long)le64_to_cpu(u1->b),
380 (unsigned long long)le64_to_cpu(u1->c),
381 (unsigned long long)le64_to_cpu(u1->d),
382 (unsigned long long)buffer_info->dma,
383 buffer_info->skb);
384
385 if (netif_msg_pktdata(adapter))
386 print_hex_dump(KERN_INFO, "",
387 DUMP_PREFIX_ADDRESS, 16, 1,
388 phys_to_virt(buffer_info->dma),
389 adapter->rx_ps_bsize0, true);
390 }
391
392 if (i == rx_ring->next_to_use)
393 printk(KERN_CONT " NTU\n");
394 else if (i == rx_ring->next_to_clean)
395 printk(KERN_CONT " NTC\n");
396 else
397 printk(KERN_CONT "\n");
398 }
399 break;
400 default:
401 case 0:
402 /* Legacy Receive Descriptor Format
403 *
404 * +-----------------------------------------------------+
405 * | Buffer Address [63:0] |
406 * +-----------------------------------------------------+
407 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
408 * +-----------------------------------------------------+
409 * 63 48 47 40 39 32 31 16 15 0
410 */
411 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
412 "[vl er S cks ln] [bi->dma ] [bi->skb] "
413 "<-- Legacy format\n");
414 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
415 rx_desc = E1000_RX_DESC(*rx_ring, i);
416 buffer_info = &rx_ring->buffer_info[i];
417 u0 = (struct my_u0 *)rx_desc;
418 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
419 "%016llX %p", i,
420 (unsigned long long)le64_to_cpu(u0->a),
421 (unsigned long long)le64_to_cpu(u0->b),
422 (unsigned long long)buffer_info->dma,
423 buffer_info->skb);
424 if (i == rx_ring->next_to_use)
425 printk(KERN_CONT " NTU\n");
426 else if (i == rx_ring->next_to_clean)
427 printk(KERN_CONT " NTC\n");
428 else
429 printk(KERN_CONT "\n");
430
431 if (netif_msg_pktdata(adapter))
432 print_hex_dump(KERN_INFO, "",
433 DUMP_PREFIX_ADDRESS,
434 16, 1,
435 phys_to_virt(buffer_info->dma),
436 adapter->rx_buffer_len, true);
437 }
438 }
439
440exit:
441 return;
442}
443
444/**
445 * e1000_desc_unused - calculate if we have unused descriptors
446 **/
447static int e1000_desc_unused(struct e1000_ring *ring)
448{
449 if (ring->next_to_clean > ring->next_to_use)
450 return ring->next_to_clean - ring->next_to_use - 1;
451
452 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
453}
454
455/**
456 * e1000_receive_skb - helper function to handle Rx indications
457 * @adapter: board private structure
458 * @status: descriptor status field as written by hardware
459 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
460 * @skb: pointer to sk_buff to be indicated to stack
461 **/
462static void e1000_receive_skb(struct e1000_adapter *adapter,
463 struct net_device *netdev, struct sk_buff *skb,
464 u8 status, __le16 vlan)
465{
466 u16 tag = le16_to_cpu(vlan);
467 skb->protocol = eth_type_trans(skb, netdev);
468
469 if (status & E1000_RXD_STAT_VP)
470 __vlan_hwaccel_put_tag(skb, tag);
471
472 napi_gro_receive(&adapter->napi, skb);
473}
474
475/**
476 * e1000_rx_checksum - Receive Checksum Offload
477 * @adapter: board private structure
478 * @status_err: receive descriptor status and error fields
479 * @csum: receive descriptor csum field
480 * @sk_buff: socket buffer with received data
481 **/
482static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
483 u32 csum, struct sk_buff *skb)
484{
485 u16 status = (u16)status_err;
486 u8 errors = (u8)(status_err >> 24);
487
488 skb_checksum_none_assert(skb);
489
490 /* Ignore Checksum bit is set */
491 if (status & E1000_RXD_STAT_IXSM)
492 return;
493 /* TCP/UDP checksum error bit is set */
494 if (errors & E1000_RXD_ERR_TCPE) {
495 /* let the stack verify checksum errors */
496 adapter->hw_csum_err++;
497 return;
498 }
499
500 /* TCP/UDP Checksum has not been calculated */
501 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
502 return;
503
504 /* It must be a TCP or UDP packet with a valid checksum */
505 if (status & E1000_RXD_STAT_TCPCS) {
506 /* TCP checksum is good */
507 skb->ip_summed = CHECKSUM_UNNECESSARY;
508 } else {
509 /*
510 * IP fragment with UDP payload
511 * Hardware complements the payload checksum, so we undo it
512 * and then put the value in host order for further stack use.
513 */
514 __sum16 sum = (__force __sum16)htons(csum);
515 skb->csum = csum_unfold(~sum);
516 skb->ip_summed = CHECKSUM_COMPLETE;
517 }
518 adapter->hw_csum_good++;
519}
520
521/**
522 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
523 * @hw: pointer to the HW structure
524 * @tail: address of tail descriptor register
525 * @i: value to write to tail descriptor register
526 *
527 * When updating the tail register, the ME could be accessing Host CSR
528 * registers at the same time. Normally, this is handled in h/w by an
529 * arbiter but on some parts there is a bug that acknowledges Host accesses
530 * later than it should which could result in the descriptor register to
531 * have an incorrect value. Workaround this by checking the FWSM register
532 * which has bit 24 set while ME is accessing Host CSR registers, wait
533 * if it is set and try again a number of times.
534 **/
535static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
536 unsigned int i)
537{
538 unsigned int j = 0;
539
540 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
541 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
542 udelay(50);
543
544 writel(i, tail);
545
546 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
547 return E1000_ERR_SWFW_SYNC;
548
549 return 0;
550}
551
552static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
553{
554 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
555 struct e1000_hw *hw = &adapter->hw;
556
557 if (e1000e_update_tail_wa(hw, tail, i)) {
558 u32 rctl = er32(RCTL);
559 ew32(RCTL, rctl & ~E1000_RCTL_EN);
560 e_err("ME firmware caused invalid RDT - resetting\n");
561 schedule_work(&adapter->reset_task);
562 }
563}
564
565static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
566{
567 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
568 struct e1000_hw *hw = &adapter->hw;
569
570 if (e1000e_update_tail_wa(hw, tail, i)) {
571 u32 tctl = er32(TCTL);
572 ew32(TCTL, tctl & ~E1000_TCTL_EN);
573 e_err("ME firmware caused invalid TDT - resetting\n");
574 schedule_work(&adapter->reset_task);
575 }
576}
577
578/**
579 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
580 * @adapter: address of board private structure
581 **/
582static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
583 int cleaned_count, gfp_t gfp)
584{
585 struct net_device *netdev = adapter->netdev;
586 struct pci_dev *pdev = adapter->pdev;
587 struct e1000_ring *rx_ring = adapter->rx_ring;
588 struct e1000_rx_desc *rx_desc;
589 struct e1000_buffer *buffer_info;
590 struct sk_buff *skb;
591 unsigned int i;
592 unsigned int bufsz = adapter->rx_buffer_len;
593
594 i = rx_ring->next_to_use;
595 buffer_info = &rx_ring->buffer_info[i];
596
597 while (cleaned_count--) {
598 skb = buffer_info->skb;
599 if (skb) {
600 skb_trim(skb, 0);
601 goto map_skb;
602 }
603
604 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
605 if (!skb) {
606 /* Better luck next round */
607 adapter->alloc_rx_buff_failed++;
608 break;
609 }
610
611 buffer_info->skb = skb;
612map_skb:
613 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
614 adapter->rx_buffer_len,
615 DMA_FROM_DEVICE);
616 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
617 dev_err(&pdev->dev, "Rx DMA map failed\n");
618 adapter->rx_dma_failed++;
619 break;
620 }
621
622 rx_desc = E1000_RX_DESC(*rx_ring, i);
623 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
624
625 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
626 /*
627 * Force memory writes to complete before letting h/w
628 * know there are new descriptors to fetch. (Only
629 * applicable for weak-ordered memory model archs,
630 * such as IA-64).
631 */
632 wmb();
633 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
634 e1000e_update_rdt_wa(adapter, i);
635 else
636 writel(i, adapter->hw.hw_addr + rx_ring->tail);
637 }
638 i++;
639 if (i == rx_ring->count)
640 i = 0;
641 buffer_info = &rx_ring->buffer_info[i];
642 }
643
644 rx_ring->next_to_use = i;
645}
646
647/**
648 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
649 * @adapter: address of board private structure
650 **/
651static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
652 int cleaned_count, gfp_t gfp)
653{
654 struct net_device *netdev = adapter->netdev;
655 struct pci_dev *pdev = adapter->pdev;
656 union e1000_rx_desc_packet_split *rx_desc;
657 struct e1000_ring *rx_ring = adapter->rx_ring;
658 struct e1000_buffer *buffer_info;
659 struct e1000_ps_page *ps_page;
660 struct sk_buff *skb;
661 unsigned int i, j;
662
663 i = rx_ring->next_to_use;
664 buffer_info = &rx_ring->buffer_info[i];
665
666 while (cleaned_count--) {
667 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
668
669 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
670 ps_page = &buffer_info->ps_pages[j];
671 if (j >= adapter->rx_ps_pages) {
672 /* all unused desc entries get hw null ptr */
673 rx_desc->read.buffer_addr[j + 1] =
674 ~cpu_to_le64(0);
675 continue;
676 }
677 if (!ps_page->page) {
678 ps_page->page = alloc_page(gfp);
679 if (!ps_page->page) {
680 adapter->alloc_rx_buff_failed++;
681 goto no_buffers;
682 }
683 ps_page->dma = dma_map_page(&pdev->dev,
684 ps_page->page,
685 0, PAGE_SIZE,
686 DMA_FROM_DEVICE);
687 if (dma_mapping_error(&pdev->dev,
688 ps_page->dma)) {
689 dev_err(&adapter->pdev->dev,
690 "Rx DMA page map failed\n");
691 adapter->rx_dma_failed++;
692 goto no_buffers;
693 }
694 }
695 /*
696 * Refresh the desc even if buffer_addrs
697 * didn't change because each write-back
698 * erases this info.
699 */
700 rx_desc->read.buffer_addr[j + 1] =
701 cpu_to_le64(ps_page->dma);
702 }
703
704 skb = __netdev_alloc_skb_ip_align(netdev,
705 adapter->rx_ps_bsize0,
706 gfp);
707
708 if (!skb) {
709 adapter->alloc_rx_buff_failed++;
710 break;
711 }
712
713 buffer_info->skb = skb;
714 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
715 adapter->rx_ps_bsize0,
716 DMA_FROM_DEVICE);
717 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
718 dev_err(&pdev->dev, "Rx DMA map failed\n");
719 adapter->rx_dma_failed++;
720 /* cleanup skb */
721 dev_kfree_skb_any(skb);
722 buffer_info->skb = NULL;
723 break;
724 }
725
726 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
727
728 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
729 /*
730 * Force memory writes to complete before letting h/w
731 * know there are new descriptors to fetch. (Only
732 * applicable for weak-ordered memory model archs,
733 * such as IA-64).
734 */
735 wmb();
736 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
737 e1000e_update_rdt_wa(adapter, i << 1);
738 else
739 writel(i << 1,
740 adapter->hw.hw_addr + rx_ring->tail);
741 }
742
743 i++;
744 if (i == rx_ring->count)
745 i = 0;
746 buffer_info = &rx_ring->buffer_info[i];
747 }
748
749no_buffers:
750 rx_ring->next_to_use = i;
751}
752
753/**
754 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
755 * @adapter: address of board private structure
756 * @cleaned_count: number of buffers to allocate this pass
757 **/
758
759static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
760 int cleaned_count, gfp_t gfp)
761{
762 struct net_device *netdev = adapter->netdev;
763 struct pci_dev *pdev = adapter->pdev;
764 struct e1000_rx_desc *rx_desc;
765 struct e1000_ring *rx_ring = adapter->rx_ring;
766 struct e1000_buffer *buffer_info;
767 struct sk_buff *skb;
768 unsigned int i;
769 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
770
771 i = rx_ring->next_to_use;
772 buffer_info = &rx_ring->buffer_info[i];
773
774 while (cleaned_count--) {
775 skb = buffer_info->skb;
776 if (skb) {
777 skb_trim(skb, 0);
778 goto check_page;
779 }
780
781 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
782 if (unlikely(!skb)) {
783 /* Better luck next round */
784 adapter->alloc_rx_buff_failed++;
785 break;
786 }
787
788 buffer_info->skb = skb;
789check_page:
790 /* allocate a new page if necessary */
791 if (!buffer_info->page) {
792 buffer_info->page = alloc_page(gfp);
793 if (unlikely(!buffer_info->page)) {
794 adapter->alloc_rx_buff_failed++;
795 break;
796 }
797 }
798
799 if (!buffer_info->dma)
800 buffer_info->dma = dma_map_page(&pdev->dev,
801 buffer_info->page, 0,
802 PAGE_SIZE,
803 DMA_FROM_DEVICE);
804
805 rx_desc = E1000_RX_DESC(*rx_ring, i);
806 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
807
808 if (unlikely(++i == rx_ring->count))
809 i = 0;
810 buffer_info = &rx_ring->buffer_info[i];
811 }
812
813 if (likely(rx_ring->next_to_use != i)) {
814 rx_ring->next_to_use = i;
815 if (unlikely(i-- == 0))
816 i = (rx_ring->count - 1);
817
818 /* Force memory writes to complete before letting h/w
819 * know there are new descriptors to fetch. (Only
820 * applicable for weak-ordered memory model archs,
821 * such as IA-64). */
822 wmb();
823 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
824 e1000e_update_rdt_wa(adapter, i);
825 else
826 writel(i, adapter->hw.hw_addr + rx_ring->tail);
827 }
828}
829
830/**
831 * e1000_clean_rx_irq - Send received data up the network stack; legacy
832 * @adapter: board private structure
833 *
834 * the return value indicates whether actual cleaning was done, there
835 * is no guarantee that everything was cleaned
836 **/
837static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
838 int *work_done, int work_to_do)
839{
840 struct net_device *netdev = adapter->netdev;
841 struct pci_dev *pdev = adapter->pdev;
842 struct e1000_hw *hw = &adapter->hw;
843 struct e1000_ring *rx_ring = adapter->rx_ring;
844 struct e1000_rx_desc *rx_desc, *next_rxd;
845 struct e1000_buffer *buffer_info, *next_buffer;
846 u32 length;
847 unsigned int i;
848 int cleaned_count = 0;
849 bool cleaned = 0;
850 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
851
852 i = rx_ring->next_to_clean;
853 rx_desc = E1000_RX_DESC(*rx_ring, i);
854 buffer_info = &rx_ring->buffer_info[i];
855
856 while (rx_desc->status & E1000_RXD_STAT_DD) {
857 struct sk_buff *skb;
858 u8 status;
859
860 if (*work_done >= work_to_do)
861 break;
862 (*work_done)++;
863 rmb(); /* read descriptor and rx_buffer_info after status DD */
864
865 status = rx_desc->status;
866 skb = buffer_info->skb;
867 buffer_info->skb = NULL;
868
869 prefetch(skb->data - NET_IP_ALIGN);
870
871 i++;
872 if (i == rx_ring->count)
873 i = 0;
874 next_rxd = E1000_RX_DESC(*rx_ring, i);
875 prefetch(next_rxd);
876
877 next_buffer = &rx_ring->buffer_info[i];
878
879 cleaned = 1;
880 cleaned_count++;
881 dma_unmap_single(&pdev->dev,
882 buffer_info->dma,
883 adapter->rx_buffer_len,
884 DMA_FROM_DEVICE);
885 buffer_info->dma = 0;
886
887 length = le16_to_cpu(rx_desc->length);
888
889 /*
890 * !EOP means multiple descriptors were used to store a single
891 * packet, if that's the case we need to toss it. In fact, we
892 * need to toss every packet with the EOP bit clear and the
893 * next frame that _does_ have the EOP bit set, as it is by
894 * definition only a frame fragment
895 */
896 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
897 adapter->flags2 |= FLAG2_IS_DISCARDING;
898
899 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
900 /* All receives must fit into a single buffer */
901 e_dbg("Receive packet consumed multiple buffers\n");
902 /* recycle */
903 buffer_info->skb = skb;
904 if (status & E1000_RXD_STAT_EOP)
905 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
906 goto next_desc;
907 }
908
909 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
910 /* recycle */
911 buffer_info->skb = skb;
912 goto next_desc;
913 }
914
915 /* adjust length to remove Ethernet CRC */
916 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
917 length -= 4;
918
919 total_rx_bytes += length;
920 total_rx_packets++;
921
922 /*
923 * code added for copybreak, this should improve
924 * performance for small packets with large amounts
925 * of reassembly being done in the stack
926 */
927 if (length < copybreak) {
928 struct sk_buff *new_skb =
929 netdev_alloc_skb_ip_align(netdev, length);
930 if (new_skb) {
931 skb_copy_to_linear_data_offset(new_skb,
932 -NET_IP_ALIGN,
933 (skb->data -
934 NET_IP_ALIGN),
935 (length +
936 NET_IP_ALIGN));
937 /* save the skb in buffer_info as good */
938 buffer_info->skb = skb;
939 skb = new_skb;
940 }
941 /* else just continue with the old one */
942 }
943 /* end copybreak code */
944 skb_put(skb, length);
945
946 /* Receive Checksum Offload */
947 e1000_rx_checksum(adapter,
948 (u32)(status) |
949 ((u32)(rx_desc->errors) << 24),
950 le16_to_cpu(rx_desc->csum), skb);
951
952 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
953
954next_desc:
955 rx_desc->status = 0;
956
957 /* return some buffers to hardware, one at a time is too slow */
958 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
959 adapter->alloc_rx_buf(adapter, cleaned_count,
960 GFP_ATOMIC);
961 cleaned_count = 0;
962 }
963
964 /* use prefetched values */
965 rx_desc = next_rxd;
966 buffer_info = next_buffer;
967 }
968 rx_ring->next_to_clean = i;
969
970 cleaned_count = e1000_desc_unused(rx_ring);
971 if (cleaned_count)
972 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
973
974 adapter->total_rx_bytes += total_rx_bytes;
975 adapter->total_rx_packets += total_rx_packets;
976 return cleaned;
977}
978
979static void e1000_put_txbuf(struct e1000_adapter *adapter,
980 struct e1000_buffer *buffer_info)
981{
982 if (buffer_info->dma) {
983 if (buffer_info->mapped_as_page)
984 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
985 buffer_info->length, DMA_TO_DEVICE);
986 else
987 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
988 buffer_info->length, DMA_TO_DEVICE);
989 buffer_info->dma = 0;
990 }
991 if (buffer_info->skb) {
992 dev_kfree_skb_any(buffer_info->skb);
993 buffer_info->skb = NULL;
994 }
995 buffer_info->time_stamp = 0;
996}
997
998static void e1000_print_hw_hang(struct work_struct *work)
999{
1000 struct e1000_adapter *adapter = container_of(work,
1001 struct e1000_adapter,
1002 print_hang_task);
1003 struct e1000_ring *tx_ring = adapter->tx_ring;
1004 unsigned int i = tx_ring->next_to_clean;
1005 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1006 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1007 struct e1000_hw *hw = &adapter->hw;
1008 u16 phy_status, phy_1000t_status, phy_ext_status;
1009 u16 pci_status;
1010
1011 if (test_bit(__E1000_DOWN, &adapter->state))
1012 return;
1013
1014 e1e_rphy(hw, PHY_STATUS, &phy_status);
1015 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1016 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1017
1018 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1019
1020 /* detected Hardware unit hang */
1021 e_err("Detected Hardware Unit Hang:\n"
1022 " TDH <%x>\n"
1023 " TDT <%x>\n"
1024 " next_to_use <%x>\n"
1025 " next_to_clean <%x>\n"
1026 "buffer_info[next_to_clean]:\n"
1027 " time_stamp <%lx>\n"
1028 " next_to_watch <%x>\n"
1029 " jiffies <%lx>\n"
1030 " next_to_watch.status <%x>\n"
1031 "MAC Status <%x>\n"
1032 "PHY Status <%x>\n"
1033 "PHY 1000BASE-T Status <%x>\n"
1034 "PHY Extended Status <%x>\n"
1035 "PCI Status <%x>\n",
1036 readl(adapter->hw.hw_addr + tx_ring->head),
1037 readl(adapter->hw.hw_addr + tx_ring->tail),
1038 tx_ring->next_to_use,
1039 tx_ring->next_to_clean,
1040 tx_ring->buffer_info[eop].time_stamp,
1041 eop,
1042 jiffies,
1043 eop_desc->upper.fields.status,
1044 er32(STATUS),
1045 phy_status,
1046 phy_1000t_status,
1047 phy_ext_status,
1048 pci_status);
1049}
1050
1051/**
1052 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1053 * @adapter: board private structure
1054 *
1055 * the return value indicates whether actual cleaning was done, there
1056 * is no guarantee that everything was cleaned
1057 **/
1058static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1059{
1060 struct net_device *netdev = adapter->netdev;
1061 struct e1000_hw *hw = &adapter->hw;
1062 struct e1000_ring *tx_ring = adapter->tx_ring;
1063 struct e1000_tx_desc *tx_desc, *eop_desc;
1064 struct e1000_buffer *buffer_info;
1065 unsigned int i, eop;
1066 unsigned int count = 0;
1067 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1068
1069 i = tx_ring->next_to_clean;
1070 eop = tx_ring->buffer_info[i].next_to_watch;
1071 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1072
1073 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1074 (count < tx_ring->count)) {
1075 bool cleaned = false;
1076 rmb(); /* read buffer_info after eop_desc */
1077 for (; !cleaned; count++) {
1078 tx_desc = E1000_TX_DESC(*tx_ring, i);
1079 buffer_info = &tx_ring->buffer_info[i];
1080 cleaned = (i == eop);
1081
1082 if (cleaned) {
1083 total_tx_packets += buffer_info->segs;
1084 total_tx_bytes += buffer_info->bytecount;
1085 }
1086
1087 e1000_put_txbuf(adapter, buffer_info);
1088 tx_desc->upper.data = 0;
1089
1090 i++;
1091 if (i == tx_ring->count)
1092 i = 0;
1093 }
1094
1095 if (i == tx_ring->next_to_use)
1096 break;
1097 eop = tx_ring->buffer_info[i].next_to_watch;
1098 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1099 }
1100
1101 tx_ring->next_to_clean = i;
1102
1103#define TX_WAKE_THRESHOLD 32
1104 if (count && netif_carrier_ok(netdev) &&
1105 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1106 /* Make sure that anybody stopping the queue after this
1107 * sees the new next_to_clean.
1108 */
1109 smp_mb();
1110
1111 if (netif_queue_stopped(netdev) &&
1112 !(test_bit(__E1000_DOWN, &adapter->state))) {
1113 netif_wake_queue(netdev);
1114 ++adapter->restart_queue;
1115 }
1116 }
1117
1118 if (adapter->detect_tx_hung) {
1119 /*
1120 * Detect a transmit hang in hardware, this serializes the
1121 * check with the clearing of time_stamp and movement of i
1122 */
1123 adapter->detect_tx_hung = 0;
1124 if (tx_ring->buffer_info[i].time_stamp &&
1125 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1126 + (adapter->tx_timeout_factor * HZ)) &&
1127 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
1128 schedule_work(&adapter->print_hang_task);
1129 netif_stop_queue(netdev);
1130 }
1131 }
1132 adapter->total_tx_bytes += total_tx_bytes;
1133 adapter->total_tx_packets += total_tx_packets;
1134 return count < tx_ring->count;
1135}
1136
1137/**
1138 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1139 * @adapter: board private structure
1140 *
1141 * the return value indicates whether actual cleaning was done, there
1142 * is no guarantee that everything was cleaned
1143 **/
1144static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1145 int *work_done, int work_to_do)
1146{
1147 struct e1000_hw *hw = &adapter->hw;
1148 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1149 struct net_device *netdev = adapter->netdev;
1150 struct pci_dev *pdev = adapter->pdev;
1151 struct e1000_ring *rx_ring = adapter->rx_ring;
1152 struct e1000_buffer *buffer_info, *next_buffer;
1153 struct e1000_ps_page *ps_page;
1154 struct sk_buff *skb;
1155 unsigned int i, j;
1156 u32 length, staterr;
1157 int cleaned_count = 0;
1158 bool cleaned = 0;
1159 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1160
1161 i = rx_ring->next_to_clean;
1162 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1163 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1164 buffer_info = &rx_ring->buffer_info[i];
1165
1166 while (staterr & E1000_RXD_STAT_DD) {
1167 if (*work_done >= work_to_do)
1168 break;
1169 (*work_done)++;
1170 skb = buffer_info->skb;
1171 rmb(); /* read descriptor and rx_buffer_info after status DD */
1172
1173 /* in the packet split case this is header only */
1174 prefetch(skb->data - NET_IP_ALIGN);
1175
1176 i++;
1177 if (i == rx_ring->count)
1178 i = 0;
1179 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1180 prefetch(next_rxd);
1181
1182 next_buffer = &rx_ring->buffer_info[i];
1183
1184 cleaned = 1;
1185 cleaned_count++;
1186 dma_unmap_single(&pdev->dev, buffer_info->dma,
1187 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1188 buffer_info->dma = 0;
1189
1190 /* see !EOP comment in other Rx routine */
1191 if (!(staterr & E1000_RXD_STAT_EOP))
1192 adapter->flags2 |= FLAG2_IS_DISCARDING;
1193
1194 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1195 e_dbg("Packet Split buffers didn't pick up the full "
1196 "packet\n");
1197 dev_kfree_skb_irq(skb);
1198 if (staterr & E1000_RXD_STAT_EOP)
1199 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1200 goto next_desc;
1201 }
1202
1203 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1204 dev_kfree_skb_irq(skb);
1205 goto next_desc;
1206 }
1207
1208 length = le16_to_cpu(rx_desc->wb.middle.length0);
1209
1210 if (!length) {
1211 e_dbg("Last part of the packet spanning multiple "
1212 "descriptors\n");
1213 dev_kfree_skb_irq(skb);
1214 goto next_desc;
1215 }
1216
1217 /* Good Receive */
1218 skb_put(skb, length);
1219
1220 {
1221 /*
1222 * this looks ugly, but it seems compiler issues make it
1223 * more efficient than reusing j
1224 */
1225 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1226
1227 /*
1228 * page alloc/put takes too long and effects small packet
1229 * throughput, so unsplit small packets and save the alloc/put
1230 * only valid in softirq (napi) context to call kmap_*
1231 */
1232 if (l1 && (l1 <= copybreak) &&
1233 ((length + l1) <= adapter->rx_ps_bsize0)) {
1234 u8 *vaddr;
1235
1236 ps_page = &buffer_info->ps_pages[0];
1237
1238 /*
1239 * there is no documentation about how to call
1240 * kmap_atomic, so we can't hold the mapping
1241 * very long
1242 */
1243 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1244 PAGE_SIZE, DMA_FROM_DEVICE);
1245 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1246 memcpy(skb_tail_pointer(skb), vaddr, l1);
1247 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1248 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1249 PAGE_SIZE, DMA_FROM_DEVICE);
1250
1251 /* remove the CRC */
1252 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1253 l1 -= 4;
1254
1255 skb_put(skb, l1);
1256 goto copydone;
1257 } /* if */
1258 }
1259
1260 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1261 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1262 if (!length)
1263 break;
1264
1265 ps_page = &buffer_info->ps_pages[j];
1266 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1267 DMA_FROM_DEVICE);
1268 ps_page->dma = 0;
1269 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1270 ps_page->page = NULL;
1271 skb->len += length;
1272 skb->data_len += length;
1273 skb->truesize += length;
1274 }
1275
1276 /* strip the ethernet crc, problem is we're using pages now so
1277 * this whole operation can get a little cpu intensive
1278 */
1279 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1280 pskb_trim(skb, skb->len - 4);
1281
1282copydone:
1283 total_rx_bytes += skb->len;
1284 total_rx_packets++;
1285
1286 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1287 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1288
1289 if (rx_desc->wb.upper.header_status &
1290 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1291 adapter->rx_hdr_split++;
1292
1293 e1000_receive_skb(adapter, netdev, skb,
1294 staterr, rx_desc->wb.middle.vlan);
1295
1296next_desc:
1297 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1298 buffer_info->skb = NULL;
1299
1300 /* return some buffers to hardware, one at a time is too slow */
1301 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1302 adapter->alloc_rx_buf(adapter, cleaned_count,
1303 GFP_ATOMIC);
1304 cleaned_count = 0;
1305 }
1306
1307 /* use prefetched values */
1308 rx_desc = next_rxd;
1309 buffer_info = next_buffer;
1310
1311 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1312 }
1313 rx_ring->next_to_clean = i;
1314
1315 cleaned_count = e1000_desc_unused(rx_ring);
1316 if (cleaned_count)
1317 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
1318
1319 adapter->total_rx_bytes += total_rx_bytes;
1320 adapter->total_rx_packets += total_rx_packets;
1321 return cleaned;
1322}
1323
1324/**
1325 * e1000_consume_page - helper function
1326 **/
1327static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1328 u16 length)
1329{
1330 bi->page = NULL;
1331 skb->len += length;
1332 skb->data_len += length;
1333 skb->truesize += length;
1334}
1335
1336/**
1337 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1338 * @adapter: board private structure
1339 *
1340 * the return value indicates whether actual cleaning was done, there
1341 * is no guarantee that everything was cleaned
1342 **/
1343
1344static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1345 int *work_done, int work_to_do)
1346{
1347 struct net_device *netdev = adapter->netdev;
1348 struct pci_dev *pdev = adapter->pdev;
1349 struct e1000_ring *rx_ring = adapter->rx_ring;
1350 struct e1000_rx_desc *rx_desc, *next_rxd;
1351 struct e1000_buffer *buffer_info, *next_buffer;
1352 u32 length;
1353 unsigned int i;
1354 int cleaned_count = 0;
1355 bool cleaned = false;
1356 unsigned int total_rx_bytes=0, total_rx_packets=0;
1357
1358 i = rx_ring->next_to_clean;
1359 rx_desc = E1000_RX_DESC(*rx_ring, i);
1360 buffer_info = &rx_ring->buffer_info[i];
1361
1362 while (rx_desc->status & E1000_RXD_STAT_DD) {
1363 struct sk_buff *skb;
1364 u8 status;
1365
1366 if (*work_done >= work_to_do)
1367 break;
1368 (*work_done)++;
1369 rmb(); /* read descriptor and rx_buffer_info after status DD */
1370
1371 status = rx_desc->status;
1372 skb = buffer_info->skb;
1373 buffer_info->skb = NULL;
1374
1375 ++i;
1376 if (i == rx_ring->count)
1377 i = 0;
1378 next_rxd = E1000_RX_DESC(*rx_ring, i);
1379 prefetch(next_rxd);
1380
1381 next_buffer = &rx_ring->buffer_info[i];
1382
1383 cleaned = true;
1384 cleaned_count++;
1385 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1386 DMA_FROM_DEVICE);
1387 buffer_info->dma = 0;
1388
1389 length = le16_to_cpu(rx_desc->length);
1390
1391 /* errors is only valid for DD + EOP descriptors */
1392 if (unlikely((status & E1000_RXD_STAT_EOP) &&
1393 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1394 /* recycle both page and skb */
1395 buffer_info->skb = skb;
1396 /* an error means any chain goes out the window
1397 * too */
1398 if (rx_ring->rx_skb_top)
1399 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1400 rx_ring->rx_skb_top = NULL;
1401 goto next_desc;
1402 }
1403
1404#define rxtop (rx_ring->rx_skb_top)
1405 if (!(status & E1000_RXD_STAT_EOP)) {
1406 /* this descriptor is only the beginning (or middle) */
1407 if (!rxtop) {
1408 /* this is the beginning of a chain */
1409 rxtop = skb;
1410 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1411 0, length);
1412 } else {
1413 /* this is the middle of a chain */
1414 skb_fill_page_desc(rxtop,
1415 skb_shinfo(rxtop)->nr_frags,
1416 buffer_info->page, 0, length);
1417 /* re-use the skb, only consumed the page */
1418 buffer_info->skb = skb;
1419 }
1420 e1000_consume_page(buffer_info, rxtop, length);
1421 goto next_desc;
1422 } else {
1423 if (rxtop) {
1424 /* end of the chain */
1425 skb_fill_page_desc(rxtop,
1426 skb_shinfo(rxtop)->nr_frags,
1427 buffer_info->page, 0, length);
1428 /* re-use the current skb, we only consumed the
1429 * page */
1430 buffer_info->skb = skb;
1431 skb = rxtop;
1432 rxtop = NULL;
1433 e1000_consume_page(buffer_info, skb, length);
1434 } else {
1435 /* no chain, got EOP, this buf is the packet
1436 * copybreak to save the put_page/alloc_page */
1437 if (length <= copybreak &&
1438 skb_tailroom(skb) >= length) {
1439 u8 *vaddr;
1440 vaddr = kmap_atomic(buffer_info->page,
1441 KM_SKB_DATA_SOFTIRQ);
1442 memcpy(skb_tail_pointer(skb), vaddr,
1443 length);
1444 kunmap_atomic(vaddr,
1445 KM_SKB_DATA_SOFTIRQ);
1446 /* re-use the page, so don't erase
1447 * buffer_info->page */
1448 skb_put(skb, length);
1449 } else {
1450 skb_fill_page_desc(skb, 0,
1451 buffer_info->page, 0,
1452 length);
1453 e1000_consume_page(buffer_info, skb,
1454 length);
1455 }
1456 }
1457 }
1458
1459 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1460 e1000_rx_checksum(adapter,
1461 (u32)(status) |
1462 ((u32)(rx_desc->errors) << 24),
1463 le16_to_cpu(rx_desc->csum), skb);
1464
1465 /* probably a little skewed due to removing CRC */
1466 total_rx_bytes += skb->len;
1467 total_rx_packets++;
1468
1469 /* eth type trans needs skb->data to point to something */
1470 if (!pskb_may_pull(skb, ETH_HLEN)) {
1471 e_err("pskb_may_pull failed.\n");
1472 dev_kfree_skb_irq(skb);
1473 goto next_desc;
1474 }
1475
1476 e1000_receive_skb(adapter, netdev, skb, status,
1477 rx_desc->special);
1478
1479next_desc:
1480 rx_desc->status = 0;
1481
1482 /* return some buffers to hardware, one at a time is too slow */
1483 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1484 adapter->alloc_rx_buf(adapter, cleaned_count,
1485 GFP_ATOMIC);
1486 cleaned_count = 0;
1487 }
1488
1489 /* use prefetched values */
1490 rx_desc = next_rxd;
1491 buffer_info = next_buffer;
1492 }
1493 rx_ring->next_to_clean = i;
1494
1495 cleaned_count = e1000_desc_unused(rx_ring);
1496 if (cleaned_count)
1497 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
1498
1499 adapter->total_rx_bytes += total_rx_bytes;
1500 adapter->total_rx_packets += total_rx_packets;
1501 return cleaned;
1502}
1503
1504/**
1505 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1506 * @adapter: board private structure
1507 **/
1508static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1509{
1510 struct e1000_ring *rx_ring = adapter->rx_ring;
1511 struct e1000_buffer *buffer_info;
1512 struct e1000_ps_page *ps_page;
1513 struct pci_dev *pdev = adapter->pdev;
1514 unsigned int i, j;
1515
1516 /* Free all the Rx ring sk_buffs */
1517 for (i = 0; i < rx_ring->count; i++) {
1518 buffer_info = &rx_ring->buffer_info[i];
1519 if (buffer_info->dma) {
1520 if (adapter->clean_rx == e1000_clean_rx_irq)
1521 dma_unmap_single(&pdev->dev, buffer_info->dma,
1522 adapter->rx_buffer_len,
1523 DMA_FROM_DEVICE);
1524 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1525 dma_unmap_page(&pdev->dev, buffer_info->dma,
1526 PAGE_SIZE,
1527 DMA_FROM_DEVICE);
1528 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1529 dma_unmap_single(&pdev->dev, buffer_info->dma,
1530 adapter->rx_ps_bsize0,
1531 DMA_FROM_DEVICE);
1532 buffer_info->dma = 0;
1533 }
1534
1535 if (buffer_info->page) {
1536 put_page(buffer_info->page);
1537 buffer_info->page = NULL;
1538 }
1539
1540 if (buffer_info->skb) {
1541 dev_kfree_skb(buffer_info->skb);
1542 buffer_info->skb = NULL;
1543 }
1544
1545 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1546 ps_page = &buffer_info->ps_pages[j];
1547 if (!ps_page->page)
1548 break;
1549 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1550 DMA_FROM_DEVICE);
1551 ps_page->dma = 0;
1552 put_page(ps_page->page);
1553 ps_page->page = NULL;
1554 }
1555 }
1556
1557 /* there also may be some cached data from a chained receive */
1558 if (rx_ring->rx_skb_top) {
1559 dev_kfree_skb(rx_ring->rx_skb_top);
1560 rx_ring->rx_skb_top = NULL;
1561 }
1562
1563 /* Zero out the descriptor ring */
1564 memset(rx_ring->desc, 0, rx_ring->size);
1565
1566 rx_ring->next_to_clean = 0;
1567 rx_ring->next_to_use = 0;
1568 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1569
1570 writel(0, adapter->hw.hw_addr + rx_ring->head);
1571 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1572}
1573
1574static void e1000e_downshift_workaround(struct work_struct *work)
1575{
1576 struct e1000_adapter *adapter = container_of(work,
1577 struct e1000_adapter, downshift_task);
1578
1579 if (test_bit(__E1000_DOWN, &adapter->state))
1580 return;
1581
1582 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1583}
1584
1585/**
1586 * e1000_intr_msi - Interrupt Handler
1587 * @irq: interrupt number
1588 * @data: pointer to a network interface device structure
1589 **/
1590static irqreturn_t e1000_intr_msi(int irq, void *data)
1591{
1592 struct net_device *netdev = data;
1593 struct e1000_adapter *adapter = netdev_priv(netdev);
1594 struct e1000_hw *hw = &adapter->hw;
1595 u32 icr = er32(ICR);
1596
1597 /*
1598 * read ICR disables interrupts using IAM
1599 */
1600
1601 if (icr & E1000_ICR_LSC) {
1602 hw->mac.get_link_status = 1;
1603 /*
1604 * ICH8 workaround-- Call gig speed drop workaround on cable
1605 * disconnect (LSC) before accessing any PHY registers
1606 */
1607 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1608 (!(er32(STATUS) & E1000_STATUS_LU)))
1609 schedule_work(&adapter->downshift_task);
1610
1611 /*
1612 * 80003ES2LAN workaround-- For packet buffer work-around on
1613 * link down event; disable receives here in the ISR and reset
1614 * adapter in watchdog
1615 */
1616 if (netif_carrier_ok(netdev) &&
1617 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1618 /* disable receives */
1619 u32 rctl = er32(RCTL);
1620 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1621 adapter->flags |= FLAG_RX_RESTART_NOW;
1622 }
1623 /* guard against interrupt when we're going down */
1624 if (!test_bit(__E1000_DOWN, &adapter->state))
1625 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1626 }
1627
1628 if (napi_schedule_prep(&adapter->napi)) {
1629 adapter->total_tx_bytes = 0;
1630 adapter->total_tx_packets = 0;
1631 adapter->total_rx_bytes = 0;
1632 adapter->total_rx_packets = 0;
1633 __napi_schedule(&adapter->napi);
1634 }
1635
1636 return IRQ_HANDLED;
1637}
1638
1639/**
1640 * e1000_intr - Interrupt Handler
1641 * @irq: interrupt number
1642 * @data: pointer to a network interface device structure
1643 **/
1644static irqreturn_t e1000_intr(int irq, void *data)
1645{
1646 struct net_device *netdev = data;
1647 struct e1000_adapter *adapter = netdev_priv(netdev);
1648 struct e1000_hw *hw = &adapter->hw;
1649 u32 rctl, icr = er32(ICR);
1650
1651 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1652 return IRQ_NONE; /* Not our interrupt */
1653
1654 /*
1655 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1656 * not set, then the adapter didn't send an interrupt
1657 */
1658 if (!(icr & E1000_ICR_INT_ASSERTED))
1659 return IRQ_NONE;
1660
1661 /*
1662 * Interrupt Auto-Mask...upon reading ICR,
1663 * interrupts are masked. No need for the
1664 * IMC write
1665 */
1666
1667 if (icr & E1000_ICR_LSC) {
1668 hw->mac.get_link_status = 1;
1669 /*
1670 * ICH8 workaround-- Call gig speed drop workaround on cable
1671 * disconnect (LSC) before accessing any PHY registers
1672 */
1673 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1674 (!(er32(STATUS) & E1000_STATUS_LU)))
1675 schedule_work(&adapter->downshift_task);
1676
1677 /*
1678 * 80003ES2LAN workaround--
1679 * For packet buffer work-around on link down event;
1680 * disable receives here in the ISR and
1681 * reset adapter in watchdog
1682 */
1683 if (netif_carrier_ok(netdev) &&
1684 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1685 /* disable receives */
1686 rctl = er32(RCTL);
1687 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1688 adapter->flags |= FLAG_RX_RESTART_NOW;
1689 }
1690 /* guard against interrupt when we're going down */
1691 if (!test_bit(__E1000_DOWN, &adapter->state))
1692 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1693 }
1694
1695 if (napi_schedule_prep(&adapter->napi)) {
1696 adapter->total_tx_bytes = 0;
1697 adapter->total_tx_packets = 0;
1698 adapter->total_rx_bytes = 0;
1699 adapter->total_rx_packets = 0;
1700 __napi_schedule(&adapter->napi);
1701 }
1702
1703 return IRQ_HANDLED;
1704}
1705
1706static irqreturn_t e1000_msix_other(int irq, void *data)
1707{
1708 struct net_device *netdev = data;
1709 struct e1000_adapter *adapter = netdev_priv(netdev);
1710 struct e1000_hw *hw = &adapter->hw;
1711 u32 icr = er32(ICR);
1712
1713 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1714 if (!test_bit(__E1000_DOWN, &adapter->state))
1715 ew32(IMS, E1000_IMS_OTHER);
1716 return IRQ_NONE;
1717 }
1718
1719 if (icr & adapter->eiac_mask)
1720 ew32(ICS, (icr & adapter->eiac_mask));
1721
1722 if (icr & E1000_ICR_OTHER) {
1723 if (!(icr & E1000_ICR_LSC))
1724 goto no_link_interrupt;
1725 hw->mac.get_link_status = 1;
1726 /* guard against interrupt when we're going down */
1727 if (!test_bit(__E1000_DOWN, &adapter->state))
1728 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1729 }
1730
1731no_link_interrupt:
1732 if (!test_bit(__E1000_DOWN, &adapter->state))
1733 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1734
1735 return IRQ_HANDLED;
1736}
1737
1738
1739static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1740{
1741 struct net_device *netdev = data;
1742 struct e1000_adapter *adapter = netdev_priv(netdev);
1743 struct e1000_hw *hw = &adapter->hw;
1744 struct e1000_ring *tx_ring = adapter->tx_ring;
1745
1746
1747 adapter->total_tx_bytes = 0;
1748 adapter->total_tx_packets = 0;
1749
1750 if (!e1000_clean_tx_irq(adapter))
1751 /* Ring was not completely cleaned, so fire another interrupt */
1752 ew32(ICS, tx_ring->ims_val);
1753
1754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1758{
1759 struct net_device *netdev = data;
1760 struct e1000_adapter *adapter = netdev_priv(netdev);
1761
1762 /* Write the ITR value calculated at the end of the
1763 * previous interrupt.
1764 */
1765 if (adapter->rx_ring->set_itr) {
1766 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1767 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1768 adapter->rx_ring->set_itr = 0;
1769 }
1770
1771 if (napi_schedule_prep(&adapter->napi)) {
1772 adapter->total_rx_bytes = 0;
1773 adapter->total_rx_packets = 0;
1774 __napi_schedule(&adapter->napi);
1775 }
1776 return IRQ_HANDLED;
1777}
1778
1779/**
1780 * e1000_configure_msix - Configure MSI-X hardware
1781 *
1782 * e1000_configure_msix sets up the hardware to properly
1783 * generate MSI-X interrupts.
1784 **/
1785static void e1000_configure_msix(struct e1000_adapter *adapter)
1786{
1787 struct e1000_hw *hw = &adapter->hw;
1788 struct e1000_ring *rx_ring = adapter->rx_ring;
1789 struct e1000_ring *tx_ring = adapter->tx_ring;
1790 int vector = 0;
1791 u32 ctrl_ext, ivar = 0;
1792
1793 adapter->eiac_mask = 0;
1794
1795 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1796 if (hw->mac.type == e1000_82574) {
1797 u32 rfctl = er32(RFCTL);
1798 rfctl |= E1000_RFCTL_ACK_DIS;
1799 ew32(RFCTL, rfctl);
1800 }
1801
1802#define E1000_IVAR_INT_ALLOC_VALID 0x8
1803 /* Configure Rx vector */
1804 rx_ring->ims_val = E1000_IMS_RXQ0;
1805 adapter->eiac_mask |= rx_ring->ims_val;
1806 if (rx_ring->itr_val)
1807 writel(1000000000 / (rx_ring->itr_val * 256),
1808 hw->hw_addr + rx_ring->itr_register);
1809 else
1810 writel(1, hw->hw_addr + rx_ring->itr_register);
1811 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1812
1813 /* Configure Tx vector */
1814 tx_ring->ims_val = E1000_IMS_TXQ0;
1815 vector++;
1816 if (tx_ring->itr_val)
1817 writel(1000000000 / (tx_ring->itr_val * 256),
1818 hw->hw_addr + tx_ring->itr_register);
1819 else
1820 writel(1, hw->hw_addr + tx_ring->itr_register);
1821 adapter->eiac_mask |= tx_ring->ims_val;
1822 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1823
1824 /* set vector for Other Causes, e.g. link changes */
1825 vector++;
1826 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1827 if (rx_ring->itr_val)
1828 writel(1000000000 / (rx_ring->itr_val * 256),
1829 hw->hw_addr + E1000_EITR_82574(vector));
1830 else
1831 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1832
1833 /* Cause Tx interrupts on every write back */
1834 ivar |= (1 << 31);
1835
1836 ew32(IVAR, ivar);
1837
1838 /* enable MSI-X PBA support */
1839 ctrl_ext = er32(CTRL_EXT);
1840 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1841
1842 /* Auto-Mask Other interrupts upon ICR read */
1843#define E1000_EIAC_MASK_82574 0x01F00000
1844 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1845 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1846 ew32(CTRL_EXT, ctrl_ext);
1847 e1e_flush();
1848}
1849
1850void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1851{
1852 if (adapter->msix_entries) {
1853 pci_disable_msix(adapter->pdev);
1854 kfree(adapter->msix_entries);
1855 adapter->msix_entries = NULL;
1856 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1857 pci_disable_msi(adapter->pdev);
1858 adapter->flags &= ~FLAG_MSI_ENABLED;
1859 }
1860}
1861
1862/**
1863 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1864 *
1865 * Attempt to configure interrupts using the best available
1866 * capabilities of the hardware and kernel.
1867 **/
1868void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1869{
1870 int err;
1871 int i;
1872
1873 switch (adapter->int_mode) {
1874 case E1000E_INT_MODE_MSIX:
1875 if (adapter->flags & FLAG_HAS_MSIX) {
1876 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1877 adapter->msix_entries = kcalloc(adapter->num_vectors,
1878 sizeof(struct msix_entry),
1879 GFP_KERNEL);
1880 if (adapter->msix_entries) {
1881 for (i = 0; i < adapter->num_vectors; i++)
1882 adapter->msix_entries[i].entry = i;
1883
1884 err = pci_enable_msix(adapter->pdev,
1885 adapter->msix_entries,
1886 adapter->num_vectors);
1887 if (err == 0)
1888 return;
1889 }
1890 /* MSI-X failed, so fall through and try MSI */
1891 e_err("Failed to initialize MSI-X interrupts. "
1892 "Falling back to MSI interrupts.\n");
1893 e1000e_reset_interrupt_capability(adapter);
1894 }
1895 adapter->int_mode = E1000E_INT_MODE_MSI;
1896 /* Fall through */
1897 case E1000E_INT_MODE_MSI:
1898 if (!pci_enable_msi(adapter->pdev)) {
1899 adapter->flags |= FLAG_MSI_ENABLED;
1900 } else {
1901 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1902 e_err("Failed to initialize MSI interrupts. Falling "
1903 "back to legacy interrupts.\n");
1904 }
1905 /* Fall through */
1906 case E1000E_INT_MODE_LEGACY:
1907 /* Don't do anything; this is the system default */
1908 break;
1909 }
1910
1911 /* store the number of vectors being used */
1912 adapter->num_vectors = 1;
1913}
1914
1915/**
1916 * e1000_request_msix - Initialize MSI-X interrupts
1917 *
1918 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1919 * kernel.
1920 **/
1921static int e1000_request_msix(struct e1000_adapter *adapter)
1922{
1923 struct net_device *netdev = adapter->netdev;
1924 int err = 0, vector = 0;
1925
1926 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1927 snprintf(adapter->rx_ring->name,
1928 sizeof(adapter->rx_ring->name) - 1,
1929 "%s-rx-0", netdev->name);
1930 else
1931 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1932 err = request_irq(adapter->msix_entries[vector].vector,
1933 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1934 netdev);
1935 if (err)
1936 goto out;
1937 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1938 adapter->rx_ring->itr_val = adapter->itr;
1939 vector++;
1940
1941 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1942 snprintf(adapter->tx_ring->name,
1943 sizeof(adapter->tx_ring->name) - 1,
1944 "%s-tx-0", netdev->name);
1945 else
1946 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1947 err = request_irq(adapter->msix_entries[vector].vector,
1948 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1949 netdev);
1950 if (err)
1951 goto out;
1952 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1953 adapter->tx_ring->itr_val = adapter->itr;
1954 vector++;
1955
1956 err = request_irq(adapter->msix_entries[vector].vector,
1957 e1000_msix_other, 0, netdev->name, netdev);
1958 if (err)
1959 goto out;
1960
1961 e1000_configure_msix(adapter);
1962 return 0;
1963out:
1964 return err;
1965}
1966
1967/**
1968 * e1000_request_irq - initialize interrupts
1969 *
1970 * Attempts to configure interrupts using the best available
1971 * capabilities of the hardware and kernel.
1972 **/
1973static int e1000_request_irq(struct e1000_adapter *adapter)
1974{
1975 struct net_device *netdev = adapter->netdev;
1976 int err;
1977
1978 if (adapter->msix_entries) {
1979 err = e1000_request_msix(adapter);
1980 if (!err)
1981 return err;
1982 /* fall back to MSI */
1983 e1000e_reset_interrupt_capability(adapter);
1984 adapter->int_mode = E1000E_INT_MODE_MSI;
1985 e1000e_set_interrupt_capability(adapter);
1986 }
1987 if (adapter->flags & FLAG_MSI_ENABLED) {
1988 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1989 netdev->name, netdev);
1990 if (!err)
1991 return err;
1992
1993 /* fall back to legacy interrupt */
1994 e1000e_reset_interrupt_capability(adapter);
1995 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1996 }
1997
1998 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1999 netdev->name, netdev);
2000 if (err)
2001 e_err("Unable to allocate interrupt, Error: %d\n", err);
2002
2003 return err;
2004}
2005
2006static void e1000_free_irq(struct e1000_adapter *adapter)
2007{
2008 struct net_device *netdev = adapter->netdev;
2009
2010 if (adapter->msix_entries) {
2011 int vector = 0;
2012
2013 free_irq(adapter->msix_entries[vector].vector, netdev);
2014 vector++;
2015
2016 free_irq(adapter->msix_entries[vector].vector, netdev);
2017 vector++;
2018
2019 /* Other Causes interrupt vector */
2020 free_irq(adapter->msix_entries[vector].vector, netdev);
2021 return;
2022 }
2023
2024 free_irq(adapter->pdev->irq, netdev);
2025}
2026
2027/**
2028 * e1000_irq_disable - Mask off interrupt generation on the NIC
2029 **/
2030static void e1000_irq_disable(struct e1000_adapter *adapter)
2031{
2032 struct e1000_hw *hw = &adapter->hw;
2033
2034 ew32(IMC, ~0);
2035 if (adapter->msix_entries)
2036 ew32(EIAC_82574, 0);
2037 e1e_flush();
2038
2039 if (adapter->msix_entries) {
2040 int i;
2041 for (i = 0; i < adapter->num_vectors; i++)
2042 synchronize_irq(adapter->msix_entries[i].vector);
2043 } else {
2044 synchronize_irq(adapter->pdev->irq);
2045 }
2046}
2047
2048/**
2049 * e1000_irq_enable - Enable default interrupt generation settings
2050 **/
2051static void e1000_irq_enable(struct e1000_adapter *adapter)
2052{
2053 struct e1000_hw *hw = &adapter->hw;
2054
2055 if (adapter->msix_entries) {
2056 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2057 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2058 } else {
2059 ew32(IMS, IMS_ENABLE_MASK);
2060 }
2061 e1e_flush();
2062}
2063
2064/**
2065 * e1000e_get_hw_control - get control of the h/w from f/w
2066 * @adapter: address of board private structure
2067 *
2068 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2069 * For ASF and Pass Through versions of f/w this means that
2070 * the driver is loaded. For AMT version (only with 82573)
2071 * of the f/w this means that the network i/f is open.
2072 **/
2073void e1000e_get_hw_control(struct e1000_adapter *adapter)
2074{
2075 struct e1000_hw *hw = &adapter->hw;
2076 u32 ctrl_ext;
2077 u32 swsm;
2078
2079 /* Let firmware know the driver has taken over */
2080 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2081 swsm = er32(SWSM);
2082 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2083 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2084 ctrl_ext = er32(CTRL_EXT);
2085 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2086 }
2087}
2088
2089/**
2090 * e1000e_release_hw_control - release control of the h/w to f/w
2091 * @adapter: address of board private structure
2092 *
2093 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2094 * For ASF and Pass Through versions of f/w this means that the
2095 * driver is no longer loaded. For AMT version (only with 82573) i
2096 * of the f/w this means that the network i/f is closed.
2097 *
2098 **/
2099void e1000e_release_hw_control(struct e1000_adapter *adapter)
2100{
2101 struct e1000_hw *hw = &adapter->hw;
2102 u32 ctrl_ext;
2103 u32 swsm;
2104
2105 /* Let firmware taken over control of h/w */
2106 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2107 swsm = er32(SWSM);
2108 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2109 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2110 ctrl_ext = er32(CTRL_EXT);
2111 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2112 }
2113}
2114
2115/**
2116 * @e1000_alloc_ring - allocate memory for a ring structure
2117 **/
2118static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2119 struct e1000_ring *ring)
2120{
2121 struct pci_dev *pdev = adapter->pdev;
2122
2123 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2124 GFP_KERNEL);
2125 if (!ring->desc)
2126 return -ENOMEM;
2127
2128 return 0;
2129}
2130
2131/**
2132 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2133 * @adapter: board private structure
2134 *
2135 * Return 0 on success, negative on failure
2136 **/
2137int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2138{
2139 struct e1000_ring *tx_ring = adapter->tx_ring;
2140 int err = -ENOMEM, size;
2141
2142 size = sizeof(struct e1000_buffer) * tx_ring->count;
2143 tx_ring->buffer_info = vzalloc(size);
2144 if (!tx_ring->buffer_info)
2145 goto err;
2146
2147 /* round up to nearest 4K */
2148 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2149 tx_ring->size = ALIGN(tx_ring->size, 4096);
2150
2151 err = e1000_alloc_ring_dma(adapter, tx_ring);
2152 if (err)
2153 goto err;
2154
2155 tx_ring->next_to_use = 0;
2156 tx_ring->next_to_clean = 0;
2157
2158 return 0;
2159err:
2160 vfree(tx_ring->buffer_info);
2161 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2162 return err;
2163}
2164
2165/**
2166 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2167 * @adapter: board private structure
2168 *
2169 * Returns 0 on success, negative on failure
2170 **/
2171int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2172{
2173 struct e1000_ring *rx_ring = adapter->rx_ring;
2174 struct e1000_buffer *buffer_info;
2175 int i, size, desc_len, err = -ENOMEM;
2176
2177 size = sizeof(struct e1000_buffer) * rx_ring->count;
2178 rx_ring->buffer_info = vzalloc(size);
2179 if (!rx_ring->buffer_info)
2180 goto err;
2181
2182 for (i = 0; i < rx_ring->count; i++) {
2183 buffer_info = &rx_ring->buffer_info[i];
2184 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2185 sizeof(struct e1000_ps_page),
2186 GFP_KERNEL);
2187 if (!buffer_info->ps_pages)
2188 goto err_pages;
2189 }
2190
2191 desc_len = sizeof(union e1000_rx_desc_packet_split);
2192
2193 /* Round up to nearest 4K */
2194 rx_ring->size = rx_ring->count * desc_len;
2195 rx_ring->size = ALIGN(rx_ring->size, 4096);
2196
2197 err = e1000_alloc_ring_dma(adapter, rx_ring);
2198 if (err)
2199 goto err_pages;
2200
2201 rx_ring->next_to_clean = 0;
2202 rx_ring->next_to_use = 0;
2203 rx_ring->rx_skb_top = NULL;
2204
2205 return 0;
2206
2207err_pages:
2208 for (i = 0; i < rx_ring->count; i++) {
2209 buffer_info = &rx_ring->buffer_info[i];
2210 kfree(buffer_info->ps_pages);
2211 }
2212err:
2213 vfree(rx_ring->buffer_info);
2214 e_err("Unable to allocate memory for the receive descriptor ring\n");
2215 return err;
2216}
2217
2218/**
2219 * e1000_clean_tx_ring - Free Tx Buffers
2220 * @adapter: board private structure
2221 **/
2222static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2223{
2224 struct e1000_ring *tx_ring = adapter->tx_ring;
2225 struct e1000_buffer *buffer_info;
2226 unsigned long size;
2227 unsigned int i;
2228
2229 for (i = 0; i < tx_ring->count; i++) {
2230 buffer_info = &tx_ring->buffer_info[i];
2231 e1000_put_txbuf(adapter, buffer_info);
2232 }
2233
2234 size = sizeof(struct e1000_buffer) * tx_ring->count;
2235 memset(tx_ring->buffer_info, 0, size);
2236
2237 memset(tx_ring->desc, 0, tx_ring->size);
2238
2239 tx_ring->next_to_use = 0;
2240 tx_ring->next_to_clean = 0;
2241
2242 writel(0, adapter->hw.hw_addr + tx_ring->head);
2243 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2244}
2245
2246/**
2247 * e1000e_free_tx_resources - Free Tx Resources per Queue
2248 * @adapter: board private structure
2249 *
2250 * Free all transmit software resources
2251 **/
2252void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2253{
2254 struct pci_dev *pdev = adapter->pdev;
2255 struct e1000_ring *tx_ring = adapter->tx_ring;
2256
2257 e1000_clean_tx_ring(adapter);
2258
2259 vfree(tx_ring->buffer_info);
2260 tx_ring->buffer_info = NULL;
2261
2262 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2263 tx_ring->dma);
2264 tx_ring->desc = NULL;
2265}
2266
2267/**
2268 * e1000e_free_rx_resources - Free Rx Resources
2269 * @adapter: board private structure
2270 *
2271 * Free all receive software resources
2272 **/
2273
2274void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2275{
2276 struct pci_dev *pdev = adapter->pdev;
2277 struct e1000_ring *rx_ring = adapter->rx_ring;
2278 int i;
2279
2280 e1000_clean_rx_ring(adapter);
2281
2282 for (i = 0; i < rx_ring->count; i++)
2283 kfree(rx_ring->buffer_info[i].ps_pages);
2284
2285 vfree(rx_ring->buffer_info);
2286 rx_ring->buffer_info = NULL;
2287
2288 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2289 rx_ring->dma);
2290 rx_ring->desc = NULL;
2291}
2292
2293/**
2294 * e1000_update_itr - update the dynamic ITR value based on statistics
2295 * @adapter: pointer to adapter
2296 * @itr_setting: current adapter->itr
2297 * @packets: the number of packets during this measurement interval
2298 * @bytes: the number of bytes during this measurement interval
2299 *
2300 * Stores a new ITR value based on packets and byte
2301 * counts during the last interrupt. The advantage of per interrupt
2302 * computation is faster updates and more accurate ITR for the current
2303 * traffic pattern. Constants in this function were computed
2304 * based on theoretical maximum wire speed and thresholds were set based
2305 * on testing data as well as attempting to minimize response time
2306 * while increasing bulk throughput. This functionality is controlled
2307 * by the InterruptThrottleRate module parameter.
2308 **/
2309static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2310 u16 itr_setting, int packets,
2311 int bytes)
2312{
2313 unsigned int retval = itr_setting;
2314
2315 if (packets == 0)
2316 goto update_itr_done;
2317
2318 switch (itr_setting) {
2319 case lowest_latency:
2320 /* handle TSO and jumbo frames */
2321 if (bytes/packets > 8000)
2322 retval = bulk_latency;
2323 else if ((packets < 5) && (bytes > 512))
2324 retval = low_latency;
2325 break;
2326 case low_latency: /* 50 usec aka 20000 ints/s */
2327 if (bytes > 10000) {
2328 /* this if handles the TSO accounting */
2329 if (bytes/packets > 8000)
2330 retval = bulk_latency;
2331 else if ((packets < 10) || ((bytes/packets) > 1200))
2332 retval = bulk_latency;
2333 else if ((packets > 35))
2334 retval = lowest_latency;
2335 } else if (bytes/packets > 2000) {
2336 retval = bulk_latency;
2337 } else if (packets <= 2 && bytes < 512) {
2338 retval = lowest_latency;
2339 }
2340 break;
2341 case bulk_latency: /* 250 usec aka 4000 ints/s */
2342 if (bytes > 25000) {
2343 if (packets > 35)
2344 retval = low_latency;
2345 } else if (bytes < 6000) {
2346 retval = low_latency;
2347 }
2348 break;
2349 }
2350
2351update_itr_done:
2352 return retval;
2353}
2354
2355static void e1000_set_itr(struct e1000_adapter *adapter)
2356{
2357 struct e1000_hw *hw = &adapter->hw;
2358 u16 current_itr;
2359 u32 new_itr = adapter->itr;
2360
2361 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2362 if (adapter->link_speed != SPEED_1000) {
2363 current_itr = 0;
2364 new_itr = 4000;
2365 goto set_itr_now;
2366 }
2367
2368 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2369 new_itr = 0;
2370 goto set_itr_now;
2371 }
2372
2373 adapter->tx_itr = e1000_update_itr(adapter,
2374 adapter->tx_itr,
2375 adapter->total_tx_packets,
2376 adapter->total_tx_bytes);
2377 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2378 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2379 adapter->tx_itr = low_latency;
2380
2381 adapter->rx_itr = e1000_update_itr(adapter,
2382 adapter->rx_itr,
2383 adapter->total_rx_packets,
2384 adapter->total_rx_bytes);
2385 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2386 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2387 adapter->rx_itr = low_latency;
2388
2389 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2390
2391 switch (current_itr) {
2392 /* counts and packets in update_itr are dependent on these numbers */
2393 case lowest_latency:
2394 new_itr = 70000;
2395 break;
2396 case low_latency:
2397 new_itr = 20000; /* aka hwitr = ~200 */
2398 break;
2399 case bulk_latency:
2400 new_itr = 4000;
2401 break;
2402 default:
2403 break;
2404 }
2405
2406set_itr_now:
2407 if (new_itr != adapter->itr) {
2408 /*
2409 * this attempts to bias the interrupt rate towards Bulk
2410 * by adding intermediate steps when interrupt rate is
2411 * increasing
2412 */
2413 new_itr = new_itr > adapter->itr ?
2414 min(adapter->itr + (new_itr >> 2), new_itr) :
2415 new_itr;
2416 adapter->itr = new_itr;
2417 adapter->rx_ring->itr_val = new_itr;
2418 if (adapter->msix_entries)
2419 adapter->rx_ring->set_itr = 1;
2420 else
2421 if (new_itr)
2422 ew32(ITR, 1000000000 / (new_itr * 256));
2423 else
2424 ew32(ITR, 0);
2425 }
2426}
2427
2428/**
2429 * e1000_alloc_queues - Allocate memory for all rings
2430 * @adapter: board private structure to initialize
2431 **/
2432static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2433{
2434 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2435 if (!adapter->tx_ring)
2436 goto err;
2437
2438 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2439 if (!adapter->rx_ring)
2440 goto err;
2441
2442 return 0;
2443err:
2444 e_err("Unable to allocate memory for queues\n");
2445 kfree(adapter->rx_ring);
2446 kfree(adapter->tx_ring);
2447 return -ENOMEM;
2448}
2449
2450/**
2451 * e1000_clean - NAPI Rx polling callback
2452 * @napi: struct associated with this polling callback
2453 * @budget: amount of packets driver is allowed to process this poll
2454 **/
2455static int e1000_clean(struct napi_struct *napi, int budget)
2456{
2457 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2458 struct e1000_hw *hw = &adapter->hw;
2459 struct net_device *poll_dev = adapter->netdev;
2460 int tx_cleaned = 1, work_done = 0;
2461
2462 adapter = netdev_priv(poll_dev);
2463
2464 if (adapter->msix_entries &&
2465 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2466 goto clean_rx;
2467
2468 tx_cleaned = e1000_clean_tx_irq(adapter);
2469
2470clean_rx:
2471 adapter->clean_rx(adapter, &work_done, budget);
2472
2473 if (!tx_cleaned)
2474 work_done = budget;
2475
2476 /* If budget not fully consumed, exit the polling mode */
2477 if (work_done < budget) {
2478 if (adapter->itr_setting & 3)
2479 e1000_set_itr(adapter);
2480 napi_complete(napi);
2481 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2482 if (adapter->msix_entries)
2483 ew32(IMS, adapter->rx_ring->ims_val);
2484 else
2485 e1000_irq_enable(adapter);
2486 }
2487 }
2488
2489 return work_done;
2490}
2491
2492static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2493{
2494 struct e1000_adapter *adapter = netdev_priv(netdev);
2495 struct e1000_hw *hw = &adapter->hw;
2496 u32 vfta, index;
2497
2498 /* don't update vlan cookie if already programmed */
2499 if ((adapter->hw.mng_cookie.status &
2500 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2501 (vid == adapter->mng_vlan_id))
2502 return;
2503
2504 /* add VID to filter table */
2505 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2506 index = (vid >> 5) & 0x7F;
2507 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2508 vfta |= (1 << (vid & 0x1F));
2509 hw->mac.ops.write_vfta(hw, index, vfta);
2510 }
2511
2512 set_bit(vid, adapter->active_vlans);
2513}
2514
2515static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2516{
2517 struct e1000_adapter *adapter = netdev_priv(netdev);
2518 struct e1000_hw *hw = &adapter->hw;
2519 u32 vfta, index;
2520
2521 if ((adapter->hw.mng_cookie.status &
2522 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2523 (vid == adapter->mng_vlan_id)) {
2524 /* release control to f/w */
2525 e1000e_release_hw_control(adapter);
2526 return;
2527 }
2528
2529 /* remove VID from filter table */
2530 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2531 index = (vid >> 5) & 0x7F;
2532 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2533 vfta &= ~(1 << (vid & 0x1F));
2534 hw->mac.ops.write_vfta(hw, index, vfta);
2535 }
2536
2537 clear_bit(vid, adapter->active_vlans);
2538}
2539
2540/**
2541 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2542 * @adapter: board private structure to initialize
2543 **/
2544static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2545{
2546 struct net_device *netdev = adapter->netdev;
2547 struct e1000_hw *hw = &adapter->hw;
2548 u32 rctl;
2549
2550 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2551 /* disable VLAN receive filtering */
2552 rctl = er32(RCTL);
2553 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2554 ew32(RCTL, rctl);
2555
2556 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2557 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2558 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2559 }
2560 }
2561}
2562
2563/**
2564 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2565 * @adapter: board private structure to initialize
2566 **/
2567static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2568{
2569 struct e1000_hw *hw = &adapter->hw;
2570 u32 rctl;
2571
2572 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2573 /* enable VLAN receive filtering */
2574 rctl = er32(RCTL);
2575 rctl |= E1000_RCTL_VFE;
2576 rctl &= ~E1000_RCTL_CFIEN;
2577 ew32(RCTL, rctl);
2578 }
2579}
2580
2581/**
2582 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2583 * @adapter: board private structure to initialize
2584 **/
2585static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2586{
2587 struct e1000_hw *hw = &adapter->hw;
2588 u32 ctrl;
2589
2590 /* disable VLAN tag insert/strip */
2591 ctrl = er32(CTRL);
2592 ctrl &= ~E1000_CTRL_VME;
2593 ew32(CTRL, ctrl);
2594}
2595
2596/**
2597 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2598 * @adapter: board private structure to initialize
2599 **/
2600static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2601{
2602 struct e1000_hw *hw = &adapter->hw;
2603 u32 ctrl;
2604
2605 /* enable VLAN tag insert/strip */
2606 ctrl = er32(CTRL);
2607 ctrl |= E1000_CTRL_VME;
2608 ew32(CTRL, ctrl);
2609}
2610
2611static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2612{
2613 struct net_device *netdev = adapter->netdev;
2614 u16 vid = adapter->hw.mng_cookie.vlan_id;
2615 u16 old_vid = adapter->mng_vlan_id;
2616
2617 if (adapter->hw.mng_cookie.status &
2618 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2619 e1000_vlan_rx_add_vid(netdev, vid);
2620 adapter->mng_vlan_id = vid;
2621 }
2622
2623 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2624 e1000_vlan_rx_kill_vid(netdev, old_vid);
2625}
2626
2627static void e1000_restore_vlan(struct e1000_adapter *adapter)
2628{
2629 u16 vid;
2630
2631 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2632
2633 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2634 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2635}
2636
2637static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2638{
2639 struct e1000_hw *hw = &adapter->hw;
2640 u32 manc, manc2h, mdef, i, j;
2641
2642 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2643 return;
2644
2645 manc = er32(MANC);
2646
2647 /*
2648 * enable receiving management packets to the host. this will probably
2649 * generate destination unreachable messages from the host OS, but
2650 * the packets will be handled on SMBUS
2651 */
2652 manc |= E1000_MANC_EN_MNG2HOST;
2653 manc2h = er32(MANC2H);
2654
2655 switch (hw->mac.type) {
2656 default:
2657 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2658 break;
2659 case e1000_82574:
2660 case e1000_82583:
2661 /*
2662 * Check if IPMI pass-through decision filter already exists;
2663 * if so, enable it.
2664 */
2665 for (i = 0, j = 0; i < 8; i++) {
2666 mdef = er32(MDEF(i));
2667
2668 /* Ignore filters with anything other than IPMI ports */
2669 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2670 continue;
2671
2672 /* Enable this decision filter in MANC2H */
2673 if (mdef)
2674 manc2h |= (1 << i);
2675
2676 j |= mdef;
2677 }
2678
2679 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2680 break;
2681
2682 /* Create new decision filter in an empty filter */
2683 for (i = 0, j = 0; i < 8; i++)
2684 if (er32(MDEF(i)) == 0) {
2685 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2686 E1000_MDEF_PORT_664));
2687 manc2h |= (1 << 1);
2688 j++;
2689 break;
2690 }
2691
2692 if (!j)
2693 e_warn("Unable to create IPMI pass-through filter\n");
2694 break;
2695 }
2696
2697 ew32(MANC2H, manc2h);
2698 ew32(MANC, manc);
2699}
2700
2701/**
2702 * e1000_configure_tx - Configure Transmit Unit after Reset
2703 * @adapter: board private structure
2704 *
2705 * Configure the Tx unit of the MAC after a reset.
2706 **/
2707static void e1000_configure_tx(struct e1000_adapter *adapter)
2708{
2709 struct e1000_hw *hw = &adapter->hw;
2710 struct e1000_ring *tx_ring = adapter->tx_ring;
2711 u64 tdba;
2712 u32 tdlen, tctl, tipg, tarc;
2713 u32 ipgr1, ipgr2;
2714
2715 /* Setup the HW Tx Head and Tail descriptor pointers */
2716 tdba = tx_ring->dma;
2717 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2718 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2719 ew32(TDBAH, (tdba >> 32));
2720 ew32(TDLEN, tdlen);
2721 ew32(TDH, 0);
2722 ew32(TDT, 0);
2723 tx_ring->head = E1000_TDH;
2724 tx_ring->tail = E1000_TDT;
2725
2726 /* Set the default values for the Tx Inter Packet Gap timer */
2727 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2728 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2729 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2730
2731 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2732 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2733
2734 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2735 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2736 ew32(TIPG, tipg);
2737
2738 /* Set the Tx Interrupt Delay register */
2739 ew32(TIDV, adapter->tx_int_delay);
2740 /* Tx irq moderation */
2741 ew32(TADV, adapter->tx_abs_int_delay);
2742
2743 if (adapter->flags2 & FLAG2_DMA_BURST) {
2744 u32 txdctl = er32(TXDCTL(0));
2745 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2746 E1000_TXDCTL_WTHRESH);
2747 /*
2748 * set up some performance related parameters to encourage the
2749 * hardware to use the bus more efficiently in bursts, depends
2750 * on the tx_int_delay to be enabled,
2751 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
2752 * hthresh = 1 ==> prefetch when one or more available
2753 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2754 * BEWARE: this seems to work but should be considered first if
2755 * there are Tx hangs or other Tx related bugs
2756 */
2757 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2758 ew32(TXDCTL(0), txdctl);
2759 /* erratum work around: set txdctl the same for both queues */
2760 ew32(TXDCTL(1), txdctl);
2761 }
2762
2763 /* Program the Transmit Control Register */
2764 tctl = er32(TCTL);
2765 tctl &= ~E1000_TCTL_CT;
2766 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2767 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2768
2769 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2770 tarc = er32(TARC(0));
2771 /*
2772 * set the speed mode bit, we'll clear it if we're not at
2773 * gigabit link later
2774 */
2775#define SPEED_MODE_BIT (1 << 21)
2776 tarc |= SPEED_MODE_BIT;
2777 ew32(TARC(0), tarc);
2778 }
2779
2780 /* errata: program both queues to unweighted RR */
2781 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2782 tarc = er32(TARC(0));
2783 tarc |= 1;
2784 ew32(TARC(0), tarc);
2785 tarc = er32(TARC(1));
2786 tarc |= 1;
2787 ew32(TARC(1), tarc);
2788 }
2789
2790 /* Setup Transmit Descriptor Settings for eop descriptor */
2791 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2792
2793 /* only set IDE if we are delaying interrupts using the timers */
2794 if (adapter->tx_int_delay)
2795 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2796
2797 /* enable Report Status bit */
2798 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2799
2800 ew32(TCTL, tctl);
2801
2802 e1000e_config_collision_dist(hw);
2803}
2804
2805/**
2806 * e1000_setup_rctl - configure the receive control registers
2807 * @adapter: Board private structure
2808 **/
2809#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2810 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2811static void e1000_setup_rctl(struct e1000_adapter *adapter)
2812{
2813 struct e1000_hw *hw = &adapter->hw;
2814 u32 rctl, rfctl;
2815 u32 pages = 0;
2816
2817 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2818 if (hw->mac.type == e1000_pch2lan) {
2819 s32 ret_val;
2820
2821 if (adapter->netdev->mtu > ETH_DATA_LEN)
2822 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2823 else
2824 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2825
2826 if (ret_val)
2827 e_dbg("failed to enable jumbo frame workaround mode\n");
2828 }
2829
2830 /* Program MC offset vector base */
2831 rctl = er32(RCTL);
2832 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2833 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2834 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2835 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2836
2837 /* Do not Store bad packets */
2838 rctl &= ~E1000_RCTL_SBP;
2839
2840 /* Enable Long Packet receive */
2841 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2842 rctl &= ~E1000_RCTL_LPE;
2843 else
2844 rctl |= E1000_RCTL_LPE;
2845
2846 /* Some systems expect that the CRC is included in SMBUS traffic. The
2847 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2848 * host memory when this is enabled
2849 */
2850 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2851 rctl |= E1000_RCTL_SECRC;
2852
2853 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2854 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2855 u16 phy_data;
2856
2857 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2858 phy_data &= 0xfff8;
2859 phy_data |= (1 << 2);
2860 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2861
2862 e1e_rphy(hw, 22, &phy_data);
2863 phy_data &= 0x0fff;
2864 phy_data |= (1 << 14);
2865 e1e_wphy(hw, 0x10, 0x2823);
2866 e1e_wphy(hw, 0x11, 0x0003);
2867 e1e_wphy(hw, 22, phy_data);
2868 }
2869
2870 /* Setup buffer sizes */
2871 rctl &= ~E1000_RCTL_SZ_4096;
2872 rctl |= E1000_RCTL_BSEX;
2873 switch (adapter->rx_buffer_len) {
2874 case 2048:
2875 default:
2876 rctl |= E1000_RCTL_SZ_2048;
2877 rctl &= ~E1000_RCTL_BSEX;
2878 break;
2879 case 4096:
2880 rctl |= E1000_RCTL_SZ_4096;
2881 break;
2882 case 8192:
2883 rctl |= E1000_RCTL_SZ_8192;
2884 break;
2885 case 16384:
2886 rctl |= E1000_RCTL_SZ_16384;
2887 break;
2888 }
2889
2890 /*
2891 * 82571 and greater support packet-split where the protocol
2892 * header is placed in skb->data and the packet data is
2893 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2894 * In the case of a non-split, skb->data is linearly filled,
2895 * followed by the page buffers. Therefore, skb->data is
2896 * sized to hold the largest protocol header.
2897 *
2898 * allocations using alloc_page take too long for regular MTU
2899 * so only enable packet split for jumbo frames
2900 *
2901 * Using pages when the page size is greater than 16k wastes
2902 * a lot of memory, since we allocate 3 pages at all times
2903 * per packet.
2904 */
2905 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2906 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2907 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2908 adapter->rx_ps_pages = pages;
2909 else
2910 adapter->rx_ps_pages = 0;
2911
2912 if (adapter->rx_ps_pages) {
2913 u32 psrctl = 0;
2914
2915 /* Configure extra packet-split registers */
2916 rfctl = er32(RFCTL);
2917 rfctl |= E1000_RFCTL_EXTEN;
2918 /*
2919 * disable packet split support for IPv6 extension headers,
2920 * because some malformed IPv6 headers can hang the Rx
2921 */
2922 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2923 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2924
2925 ew32(RFCTL, rfctl);
2926
2927 /* Enable Packet split descriptors */
2928 rctl |= E1000_RCTL_DTYP_PS;
2929
2930 psrctl |= adapter->rx_ps_bsize0 >>
2931 E1000_PSRCTL_BSIZE0_SHIFT;
2932
2933 switch (adapter->rx_ps_pages) {
2934 case 3:
2935 psrctl |= PAGE_SIZE <<
2936 E1000_PSRCTL_BSIZE3_SHIFT;
2937 case 2:
2938 psrctl |= PAGE_SIZE <<
2939 E1000_PSRCTL_BSIZE2_SHIFT;
2940 case 1:
2941 psrctl |= PAGE_SIZE >>
2942 E1000_PSRCTL_BSIZE1_SHIFT;
2943 break;
2944 }
2945
2946 ew32(PSRCTL, psrctl);
2947 }
2948
2949 ew32(RCTL, rctl);
2950 /* just started the receive unit, no need to restart */
2951 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2952}
2953
2954/**
2955 * e1000_configure_rx - Configure Receive Unit after Reset
2956 * @adapter: board private structure
2957 *
2958 * Configure the Rx unit of the MAC after a reset.
2959 **/
2960static void e1000_configure_rx(struct e1000_adapter *adapter)
2961{
2962 struct e1000_hw *hw = &adapter->hw;
2963 struct e1000_ring *rx_ring = adapter->rx_ring;
2964 u64 rdba;
2965 u32 rdlen, rctl, rxcsum, ctrl_ext;
2966
2967 if (adapter->rx_ps_pages) {
2968 /* this is a 32 byte descriptor */
2969 rdlen = rx_ring->count *
2970 sizeof(union e1000_rx_desc_packet_split);
2971 adapter->clean_rx = e1000_clean_rx_irq_ps;
2972 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2973 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2974 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2975 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2976 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2977 } else {
2978 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2979 adapter->clean_rx = e1000_clean_rx_irq;
2980 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2981 }
2982
2983 /* disable receives while setting up the descriptors */
2984 rctl = er32(RCTL);
2985 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
2986 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2987 e1e_flush();
2988 usleep_range(10000, 20000);
2989
2990 if (adapter->flags2 & FLAG2_DMA_BURST) {
2991 /*
2992 * set the writeback threshold (only takes effect if the RDTR
2993 * is set). set GRAN=1 and write back up to 0x4 worth, and
2994 * enable prefetching of 0x20 Rx descriptors
2995 * granularity = 01
2996 * wthresh = 04,
2997 * hthresh = 04,
2998 * pthresh = 0x20
2999 */
3000 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3001 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3002
3003 /*
3004 * override the delay timers for enabling bursting, only if
3005 * the value was not set by the user via module options
3006 */
3007 if (adapter->rx_int_delay == DEFAULT_RDTR)
3008 adapter->rx_int_delay = BURST_RDTR;
3009 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3010 adapter->rx_abs_int_delay = BURST_RADV;
3011 }
3012
3013 /* set the Receive Delay Timer Register */
3014 ew32(RDTR, adapter->rx_int_delay);
3015
3016 /* irq moderation */
3017 ew32(RADV, adapter->rx_abs_int_delay);
3018 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3019 ew32(ITR, 1000000000 / (adapter->itr * 256));
3020
3021 ctrl_ext = er32(CTRL_EXT);
3022 /* Auto-Mask interrupts upon ICR access */
3023 ctrl_ext |= E1000_CTRL_EXT_IAME;
3024 ew32(IAM, 0xffffffff);
3025 ew32(CTRL_EXT, ctrl_ext);
3026 e1e_flush();
3027
3028 /*
3029 * Setup the HW Rx Head and Tail Descriptor Pointers and
3030 * the Base and Length of the Rx Descriptor Ring
3031 */
3032 rdba = rx_ring->dma;
3033 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
3034 ew32(RDBAH, (rdba >> 32));
3035 ew32(RDLEN, rdlen);
3036 ew32(RDH, 0);
3037 ew32(RDT, 0);
3038 rx_ring->head = E1000_RDH;
3039 rx_ring->tail = E1000_RDT;
3040
3041 /* Enable Receive Checksum Offload for TCP and UDP */
3042 rxcsum = er32(RXCSUM);
3043 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
3044 rxcsum |= E1000_RXCSUM_TUOFL;
3045
3046 /*
3047 * IPv4 payload checksum for UDP fragments must be
3048 * used in conjunction with packet-split.
3049 */
3050 if (adapter->rx_ps_pages)
3051 rxcsum |= E1000_RXCSUM_IPPCSE;
3052 } else {
3053 rxcsum &= ~E1000_RXCSUM_TUOFL;
3054 /* no need to clear IPPCSE as it defaults to 0 */
3055 }
3056 ew32(RXCSUM, rxcsum);
3057
3058 /*
3059 * Enable early receives on supported devices, only takes effect when
3060 * packet size is equal or larger than the specified value (in 8 byte
3061 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
3062 */
3063 if ((adapter->flags & FLAG_HAS_ERT) ||
3064 (adapter->hw.mac.type == e1000_pch2lan)) {
3065 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3066 u32 rxdctl = er32(RXDCTL(0));
3067 ew32(RXDCTL(0), rxdctl | 0x3);
3068 if (adapter->flags & FLAG_HAS_ERT)
3069 ew32(ERT, E1000_ERT_2048 | (1 << 13));
3070 /*
3071 * With jumbo frames and early-receive enabled,
3072 * excessive C-state transition latencies result in
3073 * dropped transactions.
3074 */
3075 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3076 } else {
3077 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3078 PM_QOS_DEFAULT_VALUE);
3079 }
3080 }
3081
3082 /* Enable Receives */
3083 ew32(RCTL, rctl);
3084}
3085
3086/**
3087 * e1000_update_mc_addr_list - Update Multicast addresses
3088 * @hw: pointer to the HW structure
3089 * @mc_addr_list: array of multicast addresses to program
3090 * @mc_addr_count: number of multicast addresses to program
3091 *
3092 * Updates the Multicast Table Array.
3093 * The caller must have a packed mc_addr_list of multicast addresses.
3094 **/
3095static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
3096 u32 mc_addr_count)
3097{
3098 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
3099}
3100
3101/**
3102 * e1000_set_multi - Multicast and Promiscuous mode set
3103 * @netdev: network interface device structure
3104 *
3105 * The set_multi entry point is called whenever the multicast address
3106 * list or the network interface flags are updated. This routine is
3107 * responsible for configuring the hardware for proper multicast,
3108 * promiscuous mode, and all-multi behavior.
3109 **/
3110static void e1000_set_multi(struct net_device *netdev)
3111{
3112 struct e1000_adapter *adapter = netdev_priv(netdev);
3113 struct e1000_hw *hw = &adapter->hw;
3114 struct netdev_hw_addr *ha;
3115 u8 *mta_list;
3116 u32 rctl;
3117
3118 /* Check for Promiscuous and All Multicast modes */
3119
3120 rctl = er32(RCTL);
3121
3122 if (netdev->flags & IFF_PROMISC) {
3123 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3124 rctl &= ~E1000_RCTL_VFE;
3125 /* Do not hardware filter VLANs in promisc mode */
3126 e1000e_vlan_filter_disable(adapter);
3127 } else {
3128 if (netdev->flags & IFF_ALLMULTI) {
3129 rctl |= E1000_RCTL_MPE;
3130 rctl &= ~E1000_RCTL_UPE;
3131 } else {
3132 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3133 }
3134 e1000e_vlan_filter_enable(adapter);
3135 }
3136
3137 ew32(RCTL, rctl);
3138
3139 if (!netdev_mc_empty(netdev)) {
3140 int i = 0;
3141
3142 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3143 if (!mta_list)
3144 return;
3145
3146 /* prepare a packed array of only addresses. */
3147 netdev_for_each_mc_addr(ha, netdev)
3148 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3149
3150 e1000_update_mc_addr_list(hw, mta_list, i);
3151 kfree(mta_list);
3152 } else {
3153 /*
3154 * if we're called from probe, we might not have
3155 * anything to do here, so clear out the list
3156 */
3157 e1000_update_mc_addr_list(hw, NULL, 0);
3158 }
3159
3160 if (netdev->features & NETIF_F_HW_VLAN_RX)
3161 e1000e_vlan_strip_enable(adapter);
3162 else
3163 e1000e_vlan_strip_disable(adapter);
3164}
3165
3166/**
3167 * e1000_configure - configure the hardware for Rx and Tx
3168 * @adapter: private board structure
3169 **/
3170static void e1000_configure(struct e1000_adapter *adapter)
3171{
3172 e1000_set_multi(adapter->netdev);
3173
3174 e1000_restore_vlan(adapter);
3175 e1000_init_manageability_pt(adapter);
3176
3177 e1000_configure_tx(adapter);
3178 e1000_setup_rctl(adapter);
3179 e1000_configure_rx(adapter);
3180 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
3181 GFP_KERNEL);
3182}
3183
3184/**
3185 * e1000e_power_up_phy - restore link in case the phy was powered down
3186 * @adapter: address of board private structure
3187 *
3188 * The phy may be powered down to save power and turn off link when the
3189 * driver is unloaded and wake on lan is not enabled (among others)
3190 * *** this routine MUST be followed by a call to e1000e_reset ***
3191 **/
3192void e1000e_power_up_phy(struct e1000_adapter *adapter)
3193{
3194 if (adapter->hw.phy.ops.power_up)
3195 adapter->hw.phy.ops.power_up(&adapter->hw);
3196
3197 adapter->hw.mac.ops.setup_link(&adapter->hw);
3198}
3199
3200/**
3201 * e1000_power_down_phy - Power down the PHY
3202 *
3203 * Power down the PHY so no link is implied when interface is down.
3204 * The PHY cannot be powered down if management or WoL is active.
3205 */
3206static void e1000_power_down_phy(struct e1000_adapter *adapter)
3207{
3208 /* WoL is enabled */
3209 if (adapter->wol)
3210 return;
3211
3212 if (adapter->hw.phy.ops.power_down)
3213 adapter->hw.phy.ops.power_down(&adapter->hw);
3214}
3215
3216/**
3217 * e1000e_reset - bring the hardware into a known good state
3218 *
3219 * This function boots the hardware and enables some settings that
3220 * require a configuration cycle of the hardware - those cannot be
3221 * set/changed during runtime. After reset the device needs to be
3222 * properly configured for Rx, Tx etc.
3223 */
3224void e1000e_reset(struct e1000_adapter *adapter)
3225{
3226 struct e1000_mac_info *mac = &adapter->hw.mac;
3227 struct e1000_fc_info *fc = &adapter->hw.fc;
3228 struct e1000_hw *hw = &adapter->hw;
3229 u32 tx_space, min_tx_space, min_rx_space;
3230 u32 pba = adapter->pba;
3231 u16 hwm;
3232
3233 /* reset Packet Buffer Allocation to default */
3234 ew32(PBA, pba);
3235
3236 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3237 /*
3238 * To maintain wire speed transmits, the Tx FIFO should be
3239 * large enough to accommodate two full transmit packets,
3240 * rounded up to the next 1KB and expressed in KB. Likewise,
3241 * the Rx FIFO should be large enough to accommodate at least
3242 * one full receive packet and is similarly rounded up and
3243 * expressed in KB.
3244 */
3245 pba = er32(PBA);
3246 /* upper 16 bits has Tx packet buffer allocation size in KB */
3247 tx_space = pba >> 16;
3248 /* lower 16 bits has Rx packet buffer allocation size in KB */
3249 pba &= 0xffff;
3250 /*
3251 * the Tx fifo also stores 16 bytes of information about the Tx
3252 * but don't include ethernet FCS because hardware appends it
3253 */
3254 min_tx_space = (adapter->max_frame_size +
3255 sizeof(struct e1000_tx_desc) -
3256 ETH_FCS_LEN) * 2;
3257 min_tx_space = ALIGN(min_tx_space, 1024);
3258 min_tx_space >>= 10;
3259 /* software strips receive CRC, so leave room for it */
3260 min_rx_space = adapter->max_frame_size;
3261 min_rx_space = ALIGN(min_rx_space, 1024);
3262 min_rx_space >>= 10;
3263
3264 /*
3265 * If current Tx allocation is less than the min Tx FIFO size,
3266 * and the min Tx FIFO size is less than the current Rx FIFO
3267 * allocation, take space away from current Rx allocation
3268 */
3269 if ((tx_space < min_tx_space) &&
3270 ((min_tx_space - tx_space) < pba)) {
3271 pba -= min_tx_space - tx_space;
3272
3273 /*
3274 * if short on Rx space, Rx wins and must trump Tx
3275 * adjustment or use Early Receive if available
3276 */
3277 if ((pba < min_rx_space) &&
3278 (!(adapter->flags & FLAG_HAS_ERT)))
3279 /* ERT enabled in e1000_configure_rx */
3280 pba = min_rx_space;
3281 }
3282
3283 ew32(PBA, pba);
3284 }
3285
3286 /*
3287 * flow control settings
3288 *
3289 * The high water mark must be low enough to fit one full frame
3290 * (or the size used for early receive) above it in the Rx FIFO.
3291 * Set it to the lower of:
3292 * - 90% of the Rx FIFO size, and
3293 * - the full Rx FIFO size minus the early receive size (for parts
3294 * with ERT support assuming ERT set to E1000_ERT_2048), or
3295 * - the full Rx FIFO size minus one full frame
3296 */
3297 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3298 fc->pause_time = 0xFFFF;
3299 else
3300 fc->pause_time = E1000_FC_PAUSE_TIME;
3301 fc->send_xon = 1;
3302 fc->current_mode = fc->requested_mode;
3303
3304 switch (hw->mac.type) {
3305 default:
3306 if ((adapter->flags & FLAG_HAS_ERT) &&
3307 (adapter->netdev->mtu > ETH_DATA_LEN))
3308 hwm = min(((pba << 10) * 9 / 10),
3309 ((pba << 10) - (E1000_ERT_2048 << 3)));
3310 else
3311 hwm = min(((pba << 10) * 9 / 10),
3312 ((pba << 10) - adapter->max_frame_size));
3313
3314 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3315 fc->low_water = fc->high_water - 8;
3316 break;
3317 case e1000_pchlan:
3318 /*
3319 * Workaround PCH LOM adapter hangs with certain network
3320 * loads. If hangs persist, try disabling Tx flow control.
3321 */
3322 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3323 fc->high_water = 0x3500;
3324 fc->low_water = 0x1500;
3325 } else {
3326 fc->high_water = 0x5000;
3327 fc->low_water = 0x3000;
3328 }
3329 fc->refresh_time = 0x1000;
3330 break;
3331 case e1000_pch2lan:
3332 fc->high_water = 0x05C20;
3333 fc->low_water = 0x05048;
3334 fc->pause_time = 0x0650;
3335 fc->refresh_time = 0x0400;
3336 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3337 pba = 14;
3338 ew32(PBA, pba);
3339 }
3340 break;
3341 }
3342
3343 /*
3344 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3345 * fit in receive buffer and early-receive not supported.
3346 */
3347 if (adapter->itr_setting & 0x3) {
3348 if (((adapter->max_frame_size * 2) > (pba << 10)) &&
3349 !(adapter->flags & FLAG_HAS_ERT)) {
3350 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3351 dev_info(&adapter->pdev->dev,
3352 "Interrupt Throttle Rate turned off\n");
3353 adapter->flags2 |= FLAG2_DISABLE_AIM;
3354 ew32(ITR, 0);
3355 }
3356 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3357 dev_info(&adapter->pdev->dev,
3358 "Interrupt Throttle Rate turned on\n");
3359 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3360 adapter->itr = 20000;
3361 ew32(ITR, 1000000000 / (adapter->itr * 256));
3362 }
3363 }
3364
3365 /* Allow time for pending master requests to run */
3366 mac->ops.reset_hw(hw);
3367
3368 /*
3369 * For parts with AMT enabled, let the firmware know
3370 * that the network interface is in control
3371 */
3372 if (adapter->flags & FLAG_HAS_AMT)
3373 e1000e_get_hw_control(adapter);
3374
3375 ew32(WUC, 0);
3376
3377 if (mac->ops.init_hw(hw))
3378 e_err("Hardware Error\n");
3379
3380 e1000_update_mng_vlan(adapter);
3381
3382 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3383 ew32(VET, ETH_P_8021Q);
3384
3385 e1000e_reset_adaptive(hw);
3386
3387 if (!netif_running(adapter->netdev) &&
3388 !test_bit(__E1000_TESTING, &adapter->state)) {
3389 e1000_power_down_phy(adapter);
3390 return;
3391 }
3392
3393 e1000_get_phy_info(hw);
3394
3395 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3396 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3397 u16 phy_data = 0;
3398 /*
3399 * speed up time to link by disabling smart power down, ignore
3400 * the return value of this function because there is nothing
3401 * different we would do if it failed
3402 */
3403 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3404 phy_data &= ~IGP02E1000_PM_SPD;
3405 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3406 }
3407}
3408
3409int e1000e_up(struct e1000_adapter *adapter)
3410{
3411 struct e1000_hw *hw = &adapter->hw;
3412
3413 /* hardware has been reset, we need to reload some things */
3414 e1000_configure(adapter);
3415
3416 clear_bit(__E1000_DOWN, &adapter->state);
3417
3418 napi_enable(&adapter->napi);
3419 if (adapter->msix_entries)
3420 e1000_configure_msix(adapter);
3421 e1000_irq_enable(adapter);
3422
3423 netif_start_queue(adapter->netdev);
3424
3425 /* fire a link change interrupt to start the watchdog */
3426 if (adapter->msix_entries)
3427 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3428 else
3429 ew32(ICS, E1000_ICS_LSC);
3430
3431 return 0;
3432}
3433
3434static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3435{
3436 struct e1000_hw *hw = &adapter->hw;
3437
3438 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3439 return;
3440
3441 /* flush pending descriptor writebacks to memory */
3442 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3443 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3444
3445 /* execute the writes immediately */
3446 e1e_flush();
3447}
3448
3449static void e1000e_update_stats(struct e1000_adapter *adapter);
3450
3451void e1000e_down(struct e1000_adapter *adapter)
3452{
3453 struct net_device *netdev = adapter->netdev;
3454 struct e1000_hw *hw = &adapter->hw;
3455 u32 tctl, rctl;
3456
3457 /*
3458 * signal that we're down so the interrupt handler does not
3459 * reschedule our watchdog timer
3460 */
3461 set_bit(__E1000_DOWN, &adapter->state);
3462
3463 /* disable receives in the hardware */
3464 rctl = er32(RCTL);
3465 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3466 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3467 /* flush and sleep below */
3468
3469 netif_stop_queue(netdev);
3470
3471 /* disable transmits in the hardware */
3472 tctl = er32(TCTL);
3473 tctl &= ~E1000_TCTL_EN;
3474 ew32(TCTL, tctl);
3475
3476 /* flush both disables and wait for them to finish */
3477 e1e_flush();
3478 usleep_range(10000, 20000);
3479
3480 napi_disable(&adapter->napi);
3481 e1000_irq_disable(adapter);
3482
3483 del_timer_sync(&adapter->watchdog_timer);
3484 del_timer_sync(&adapter->phy_info_timer);
3485
3486 netif_carrier_off(netdev);
3487
3488 spin_lock(&adapter->stats64_lock);
3489 e1000e_update_stats(adapter);
3490 spin_unlock(&adapter->stats64_lock);
3491
3492 e1000e_flush_descriptors(adapter);
3493 e1000_clean_tx_ring(adapter);
3494 e1000_clean_rx_ring(adapter);
3495
3496 adapter->link_speed = 0;
3497 adapter->link_duplex = 0;
3498
3499 if (!pci_channel_offline(adapter->pdev))
3500 e1000e_reset(adapter);
3501
3502 /*
3503 * TODO: for power management, we could drop the link and
3504 * pci_disable_device here.
3505 */
3506}
3507
3508void e1000e_reinit_locked(struct e1000_adapter *adapter)
3509{
3510 might_sleep();
3511 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3512 usleep_range(1000, 2000);
3513 e1000e_down(adapter);
3514 e1000e_up(adapter);
3515 clear_bit(__E1000_RESETTING, &adapter->state);
3516}
3517
3518/**
3519 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3520 * @adapter: board private structure to initialize
3521 *
3522 * e1000_sw_init initializes the Adapter private data structure.
3523 * Fields are initialized based on PCI device information and
3524 * OS network device settings (MTU size).
3525 **/
3526static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3527{
3528 struct net_device *netdev = adapter->netdev;
3529
3530 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3531 adapter->rx_ps_bsize0 = 128;
3532 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3533 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3534
3535 spin_lock_init(&adapter->stats64_lock);
3536
3537 e1000e_set_interrupt_capability(adapter);
3538
3539 if (e1000_alloc_queues(adapter))
3540 return -ENOMEM;
3541
3542 /* Explicitly disable IRQ since the NIC can be in any state. */
3543 e1000_irq_disable(adapter);
3544
3545 set_bit(__E1000_DOWN, &adapter->state);
3546 return 0;
3547}
3548
3549/**
3550 * e1000_intr_msi_test - Interrupt Handler
3551 * @irq: interrupt number
3552 * @data: pointer to a network interface device structure
3553 **/
3554static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3555{
3556 struct net_device *netdev = data;
3557 struct e1000_adapter *adapter = netdev_priv(netdev);
3558 struct e1000_hw *hw = &adapter->hw;
3559 u32 icr = er32(ICR);
3560
3561 e_dbg("icr is %08X\n", icr);
3562 if (icr & E1000_ICR_RXSEQ) {
3563 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3564 wmb();
3565 }
3566
3567 return IRQ_HANDLED;
3568}
3569
3570/**
3571 * e1000_test_msi_interrupt - Returns 0 for successful test
3572 * @adapter: board private struct
3573 *
3574 * code flow taken from tg3.c
3575 **/
3576static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3577{
3578 struct net_device *netdev = adapter->netdev;
3579 struct e1000_hw *hw = &adapter->hw;
3580 int err;
3581
3582 /* poll_enable hasn't been called yet, so don't need disable */
3583 /* clear any pending events */
3584 er32(ICR);
3585
3586 /* free the real vector and request a test handler */
3587 e1000_free_irq(adapter);
3588 e1000e_reset_interrupt_capability(adapter);
3589
3590 /* Assume that the test fails, if it succeeds then the test
3591 * MSI irq handler will unset this flag */
3592 adapter->flags |= FLAG_MSI_TEST_FAILED;
3593
3594 err = pci_enable_msi(adapter->pdev);
3595 if (err)
3596 goto msi_test_failed;
3597
3598 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3599 netdev->name, netdev);
3600 if (err) {
3601 pci_disable_msi(adapter->pdev);
3602 goto msi_test_failed;
3603 }
3604
3605 wmb();
3606
3607 e1000_irq_enable(adapter);
3608
3609 /* fire an unusual interrupt on the test handler */
3610 ew32(ICS, E1000_ICS_RXSEQ);
3611 e1e_flush();
3612 msleep(50);
3613
3614 e1000_irq_disable(adapter);
3615
3616 rmb();
3617
3618 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3619 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3620 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3621 } else
3622 e_dbg("MSI interrupt test succeeded!\n");
3623
3624 free_irq(adapter->pdev->irq, netdev);
3625 pci_disable_msi(adapter->pdev);
3626
3627msi_test_failed:
3628 e1000e_set_interrupt_capability(adapter);
3629 return e1000_request_irq(adapter);
3630}
3631
3632/**
3633 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3634 * @adapter: board private struct
3635 *
3636 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3637 **/
3638static int e1000_test_msi(struct e1000_adapter *adapter)
3639{
3640 int err;
3641 u16 pci_cmd;
3642
3643 if (!(adapter->flags & FLAG_MSI_ENABLED))
3644 return 0;
3645
3646 /* disable SERR in case the MSI write causes a master abort */
3647 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3648 if (pci_cmd & PCI_COMMAND_SERR)
3649 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3650 pci_cmd & ~PCI_COMMAND_SERR);
3651
3652 err = e1000_test_msi_interrupt(adapter);
3653
3654 /* re-enable SERR */
3655 if (pci_cmd & PCI_COMMAND_SERR) {
3656 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3657 pci_cmd |= PCI_COMMAND_SERR;
3658 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3659 }
3660
3661 return err;
3662}
3663
3664/**
3665 * e1000_open - Called when a network interface is made active
3666 * @netdev: network interface device structure
3667 *
3668 * Returns 0 on success, negative value on failure
3669 *
3670 * The open entry point is called when a network interface is made
3671 * active by the system (IFF_UP). At this point all resources needed
3672 * for transmit and receive operations are allocated, the interrupt
3673 * handler is registered with the OS, the watchdog timer is started,
3674 * and the stack is notified that the interface is ready.
3675 **/
3676static int e1000_open(struct net_device *netdev)
3677{
3678 struct e1000_adapter *adapter = netdev_priv(netdev);
3679 struct e1000_hw *hw = &adapter->hw;
3680 struct pci_dev *pdev = adapter->pdev;
3681 int err;
3682
3683 /* disallow open during test */
3684 if (test_bit(__E1000_TESTING, &adapter->state))
3685 return -EBUSY;
3686
3687 pm_runtime_get_sync(&pdev->dev);
3688
3689 netif_carrier_off(netdev);
3690
3691 /* allocate transmit descriptors */
3692 err = e1000e_setup_tx_resources(adapter);
3693 if (err)
3694 goto err_setup_tx;
3695
3696 /* allocate receive descriptors */
3697 err = e1000e_setup_rx_resources(adapter);
3698 if (err)
3699 goto err_setup_rx;
3700
3701 /*
3702 * If AMT is enabled, let the firmware know that the network
3703 * interface is now open and reset the part to a known state.
3704 */
3705 if (adapter->flags & FLAG_HAS_AMT) {
3706 e1000e_get_hw_control(adapter);
3707 e1000e_reset(adapter);
3708 }
3709
3710 e1000e_power_up_phy(adapter);
3711
3712 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3713 if ((adapter->hw.mng_cookie.status &
3714 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3715 e1000_update_mng_vlan(adapter);
3716
3717 /* DMA latency requirement to workaround early-receive/jumbo issue */
3718 if ((adapter->flags & FLAG_HAS_ERT) ||
3719 (adapter->hw.mac.type == e1000_pch2lan))
3720 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3721 PM_QOS_CPU_DMA_LATENCY,
3722 PM_QOS_DEFAULT_VALUE);
3723
3724 /*
3725 * before we allocate an interrupt, we must be ready to handle it.
3726 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3727 * as soon as we call pci_request_irq, so we have to setup our
3728 * clean_rx handler before we do so.
3729 */
3730 e1000_configure(adapter);
3731
3732 err = e1000_request_irq(adapter);
3733 if (err)
3734 goto err_req_irq;
3735
3736 /*
3737 * Work around PCIe errata with MSI interrupts causing some chipsets to
3738 * ignore e1000e MSI messages, which means we need to test our MSI
3739 * interrupt now
3740 */
3741 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3742 err = e1000_test_msi(adapter);
3743 if (err) {
3744 e_err("Interrupt allocation failed\n");
3745 goto err_req_irq;
3746 }
3747 }
3748
3749 /* From here on the code is the same as e1000e_up() */
3750 clear_bit(__E1000_DOWN, &adapter->state);
3751
3752 napi_enable(&adapter->napi);
3753
3754 e1000_irq_enable(adapter);
3755
3756 netif_start_queue(netdev);
3757
3758 adapter->idle_check = true;
3759 pm_runtime_put(&pdev->dev);
3760
3761 /* fire a link status change interrupt to start the watchdog */
3762 if (adapter->msix_entries)
3763 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3764 else
3765 ew32(ICS, E1000_ICS_LSC);
3766
3767 return 0;
3768
3769err_req_irq:
3770 e1000e_release_hw_control(adapter);
3771 e1000_power_down_phy(adapter);
3772 e1000e_free_rx_resources(adapter);
3773err_setup_rx:
3774 e1000e_free_tx_resources(adapter);
3775err_setup_tx:
3776 e1000e_reset(adapter);
3777 pm_runtime_put_sync(&pdev->dev);
3778
3779 return err;
3780}
3781
3782/**
3783 * e1000_close - Disables a network interface
3784 * @netdev: network interface device structure
3785 *
3786 * Returns 0, this is not allowed to fail
3787 *
3788 * The close entry point is called when an interface is de-activated
3789 * by the OS. The hardware is still under the drivers control, but
3790 * needs to be disabled. A global MAC reset is issued to stop the
3791 * hardware, and all transmit and receive resources are freed.
3792 **/
3793static int e1000_close(struct net_device *netdev)
3794{
3795 struct e1000_adapter *adapter = netdev_priv(netdev);
3796 struct pci_dev *pdev = adapter->pdev;
3797
3798 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3799
3800 pm_runtime_get_sync(&pdev->dev);
3801
3802 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3803 e1000e_down(adapter);
3804 e1000_free_irq(adapter);
3805 }
3806 e1000_power_down_phy(adapter);
3807
3808 e1000e_free_tx_resources(adapter);
3809 e1000e_free_rx_resources(adapter);
3810
3811 /*
3812 * kill manageability vlan ID if supported, but not if a vlan with
3813 * the same ID is registered on the host OS (let 8021q kill it)
3814 */
3815 if (adapter->hw.mng_cookie.status &
3816 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3817 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3818
3819 /*
3820 * If AMT is enabled, let the firmware know that the network
3821 * interface is now closed
3822 */
3823 if ((adapter->flags & FLAG_HAS_AMT) &&
3824 !test_bit(__E1000_TESTING, &adapter->state))
3825 e1000e_release_hw_control(adapter);
3826
3827 if ((adapter->flags & FLAG_HAS_ERT) ||
3828 (adapter->hw.mac.type == e1000_pch2lan))
3829 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3830
3831 pm_runtime_put_sync(&pdev->dev);
3832
3833 return 0;
3834}
3835/**
3836 * e1000_set_mac - Change the Ethernet Address of the NIC
3837 * @netdev: network interface device structure
3838 * @p: pointer to an address structure
3839 *
3840 * Returns 0 on success, negative on failure
3841 **/
3842static int e1000_set_mac(struct net_device *netdev, void *p)
3843{
3844 struct e1000_adapter *adapter = netdev_priv(netdev);
3845 struct sockaddr *addr = p;
3846
3847 if (!is_valid_ether_addr(addr->sa_data))
3848 return -EADDRNOTAVAIL;
3849
3850 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3851 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3852
3853 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3854
3855 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3856 /* activate the work around */
3857 e1000e_set_laa_state_82571(&adapter->hw, 1);
3858
3859 /*
3860 * Hold a copy of the LAA in RAR[14] This is done so that
3861 * between the time RAR[0] gets clobbered and the time it
3862 * gets fixed (in e1000_watchdog), the actual LAA is in one
3863 * of the RARs and no incoming packets directed to this port
3864 * are dropped. Eventually the LAA will be in RAR[0] and
3865 * RAR[14]
3866 */
3867 e1000e_rar_set(&adapter->hw,
3868 adapter->hw.mac.addr,
3869 adapter->hw.mac.rar_entry_count - 1);
3870 }
3871
3872 return 0;
3873}
3874
3875/**
3876 * e1000e_update_phy_task - work thread to update phy
3877 * @work: pointer to our work struct
3878 *
3879 * this worker thread exists because we must acquire a
3880 * semaphore to read the phy, which we could msleep while
3881 * waiting for it, and we can't msleep in a timer.
3882 **/
3883static void e1000e_update_phy_task(struct work_struct *work)
3884{
3885 struct e1000_adapter *adapter = container_of(work,
3886 struct e1000_adapter, update_phy_task);
3887
3888 if (test_bit(__E1000_DOWN, &adapter->state))
3889 return;
3890
3891 e1000_get_phy_info(&adapter->hw);
3892}
3893
3894/*
3895 * Need to wait a few seconds after link up to get diagnostic information from
3896 * the phy
3897 */
3898static void e1000_update_phy_info(unsigned long data)
3899{
3900 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3901
3902 if (test_bit(__E1000_DOWN, &adapter->state))
3903 return;
3904
3905 schedule_work(&adapter->update_phy_task);
3906}
3907
3908/**
3909 * e1000e_update_phy_stats - Update the PHY statistics counters
3910 * @adapter: board private structure
3911 *
3912 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
3913 **/
3914static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3915{
3916 struct e1000_hw *hw = &adapter->hw;
3917 s32 ret_val;
3918 u16 phy_data;
3919
3920 ret_val = hw->phy.ops.acquire(hw);
3921 if (ret_val)
3922 return;
3923
3924 /*
3925 * A page set is expensive so check if already on desired page.
3926 * If not, set to the page with the PHY status registers.
3927 */
3928 hw->phy.addr = 1;
3929 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3930 &phy_data);
3931 if (ret_val)
3932 goto release;
3933 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
3934 ret_val = hw->phy.ops.set_page(hw,
3935 HV_STATS_PAGE << IGP_PAGE_SHIFT);
3936 if (ret_val)
3937 goto release;
3938 }
3939
3940 /* Single Collision Count */
3941 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
3942 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
3943 if (!ret_val)
3944 adapter->stats.scc += phy_data;
3945
3946 /* Excessive Collision Count */
3947 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
3948 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
3949 if (!ret_val)
3950 adapter->stats.ecol += phy_data;
3951
3952 /* Multiple Collision Count */
3953 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
3954 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
3955 if (!ret_val)
3956 adapter->stats.mcc += phy_data;
3957
3958 /* Late Collision Count */
3959 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
3960 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
3961 if (!ret_val)
3962 adapter->stats.latecol += phy_data;
3963
3964 /* Collision Count - also used for adaptive IFS */
3965 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
3966 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
3967 if (!ret_val)
3968 hw->mac.collision_delta = phy_data;
3969
3970 /* Defer Count */
3971 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
3972 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
3973 if (!ret_val)
3974 adapter->stats.dc += phy_data;
3975
3976 /* Transmit with no CRS */
3977 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
3978 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
3979 if (!ret_val)
3980 adapter->stats.tncrs += phy_data;
3981
3982release:
3983 hw->phy.ops.release(hw);
3984}
3985
3986/**
3987 * e1000e_update_stats - Update the board statistics counters
3988 * @adapter: board private structure
3989 **/
3990static void e1000e_update_stats(struct e1000_adapter *adapter)
3991{
3992 struct net_device *netdev = adapter->netdev;
3993 struct e1000_hw *hw = &adapter->hw;
3994 struct pci_dev *pdev = adapter->pdev;
3995
3996 /*
3997 * Prevent stats update while adapter is being reset, or if the pci
3998 * connection is down.
3999 */
4000 if (adapter->link_speed == 0)
4001 return;
4002 if (pci_channel_offline(pdev))
4003 return;
4004
4005 adapter->stats.crcerrs += er32(CRCERRS);
4006 adapter->stats.gprc += er32(GPRC);
4007 adapter->stats.gorc += er32(GORCL);
4008 er32(GORCH); /* Clear gorc */
4009 adapter->stats.bprc += er32(BPRC);
4010 adapter->stats.mprc += er32(MPRC);
4011 adapter->stats.roc += er32(ROC);
4012
4013 adapter->stats.mpc += er32(MPC);
4014
4015 /* Half-duplex statistics */
4016 if (adapter->link_duplex == HALF_DUPLEX) {
4017 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4018 e1000e_update_phy_stats(adapter);
4019 } else {
4020 adapter->stats.scc += er32(SCC);
4021 adapter->stats.ecol += er32(ECOL);
4022 adapter->stats.mcc += er32(MCC);
4023 adapter->stats.latecol += er32(LATECOL);
4024 adapter->stats.dc += er32(DC);
4025
4026 hw->mac.collision_delta = er32(COLC);
4027
4028 if ((hw->mac.type != e1000_82574) &&
4029 (hw->mac.type != e1000_82583))
4030 adapter->stats.tncrs += er32(TNCRS);
4031 }
4032 adapter->stats.colc += hw->mac.collision_delta;
4033 }
4034
4035 adapter->stats.xonrxc += er32(XONRXC);
4036 adapter->stats.xontxc += er32(XONTXC);
4037 adapter->stats.xoffrxc += er32(XOFFRXC);
4038 adapter->stats.xofftxc += er32(XOFFTXC);
4039 adapter->stats.gptc += er32(GPTC);
4040 adapter->stats.gotc += er32(GOTCL);
4041 er32(GOTCH); /* Clear gotc */
4042 adapter->stats.rnbc += er32(RNBC);
4043 adapter->stats.ruc += er32(RUC);
4044
4045 adapter->stats.mptc += er32(MPTC);
4046 adapter->stats.bptc += er32(BPTC);
4047
4048 /* used for adaptive IFS */
4049
4050 hw->mac.tx_packet_delta = er32(TPT);
4051 adapter->stats.tpt += hw->mac.tx_packet_delta;
4052
4053 adapter->stats.algnerrc += er32(ALGNERRC);
4054 adapter->stats.rxerrc += er32(RXERRC);
4055 adapter->stats.cexterr += er32(CEXTERR);
4056 adapter->stats.tsctc += er32(TSCTC);
4057 adapter->stats.tsctfc += er32(TSCTFC);
4058
4059 /* Fill out the OS statistics structure */
4060 netdev->stats.multicast = adapter->stats.mprc;
4061 netdev->stats.collisions = adapter->stats.colc;
4062
4063 /* Rx Errors */
4064
4065 /*
4066 * RLEC on some newer hardware can be incorrect so build
4067 * our own version based on RUC and ROC
4068 */
4069 netdev->stats.rx_errors = adapter->stats.rxerrc +
4070 adapter->stats.crcerrs + adapter->stats.algnerrc +
4071 adapter->stats.ruc + adapter->stats.roc +
4072 adapter->stats.cexterr;
4073 netdev->stats.rx_length_errors = adapter->stats.ruc +
4074 adapter->stats.roc;
4075 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4076 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4077 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4078
4079 /* Tx Errors */
4080 netdev->stats.tx_errors = adapter->stats.ecol +
4081 adapter->stats.latecol;
4082 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4083 netdev->stats.tx_window_errors = adapter->stats.latecol;
4084 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4085
4086 /* Tx Dropped needs to be maintained elsewhere */
4087
4088 /* Management Stats */
4089 adapter->stats.mgptc += er32(MGTPTC);
4090 adapter->stats.mgprc += er32(MGTPRC);
4091 adapter->stats.mgpdc += er32(MGTPDC);
4092}
4093
4094/**
4095 * e1000_phy_read_status - Update the PHY register status snapshot
4096 * @adapter: board private structure
4097 **/
4098static void e1000_phy_read_status(struct e1000_adapter *adapter)
4099{
4100 struct e1000_hw *hw = &adapter->hw;
4101 struct e1000_phy_regs *phy = &adapter->phy_regs;
4102
4103 if ((er32(STATUS) & E1000_STATUS_LU) &&
4104 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4105 int ret_val;
4106
4107 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4108 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4109 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4110 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4111 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4112 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4113 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4114 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4115 if (ret_val)
4116 e_warn("Error reading PHY register\n");
4117 } else {
4118 /*
4119 * Do not read PHY registers if link is not up
4120 * Set values to typical power-on defaults
4121 */
4122 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4123 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4124 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4125 BMSR_ERCAP);
4126 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4127 ADVERTISE_ALL | ADVERTISE_CSMA);
4128 phy->lpa = 0;
4129 phy->expansion = EXPANSION_ENABLENPAGE;
4130 phy->ctrl1000 = ADVERTISE_1000FULL;
4131 phy->stat1000 = 0;
4132 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4133 }
4134}
4135
4136static void e1000_print_link_info(struct e1000_adapter *adapter)
4137{
4138 struct e1000_hw *hw = &adapter->hw;
4139 u32 ctrl = er32(CTRL);
4140
4141 /* Link status message must follow this format for user tools */
4142 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
4143 "Flow Control: %s\n",
4144 adapter->netdev->name,
4145 adapter->link_speed,
4146 (adapter->link_duplex == FULL_DUPLEX) ?
4147 "Full Duplex" : "Half Duplex",
4148 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
4149 "Rx/Tx" :
4150 ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
4151 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
4152}
4153
4154static bool e1000e_has_link(struct e1000_adapter *adapter)
4155{
4156 struct e1000_hw *hw = &adapter->hw;
4157 bool link_active = 0;
4158 s32 ret_val = 0;
4159
4160 /*
4161 * get_link_status is set on LSC (link status) interrupt or
4162 * Rx sequence error interrupt. get_link_status will stay
4163 * false until the check_for_link establishes link
4164 * for copper adapters ONLY
4165 */
4166 switch (hw->phy.media_type) {
4167 case e1000_media_type_copper:
4168 if (hw->mac.get_link_status) {
4169 ret_val = hw->mac.ops.check_for_link(hw);
4170 link_active = !hw->mac.get_link_status;
4171 } else {
4172 link_active = 1;
4173 }
4174 break;
4175 case e1000_media_type_fiber:
4176 ret_val = hw->mac.ops.check_for_link(hw);
4177 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4178 break;
4179 case e1000_media_type_internal_serdes:
4180 ret_val = hw->mac.ops.check_for_link(hw);
4181 link_active = adapter->hw.mac.serdes_has_link;
4182 break;
4183 default:
4184 case e1000_media_type_unknown:
4185 break;
4186 }
4187
4188 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4189 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4190 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4191 e_info("Gigabit has been disabled, downgrading speed\n");
4192 }
4193
4194 return link_active;
4195}
4196
4197static void e1000e_enable_receives(struct e1000_adapter *adapter)
4198{
4199 /* make sure the receive unit is started */
4200 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4201 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4202 struct e1000_hw *hw = &adapter->hw;
4203 u32 rctl = er32(RCTL);
4204 ew32(RCTL, rctl | E1000_RCTL_EN);
4205 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4206 }
4207}
4208
4209static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4210{
4211 struct e1000_hw *hw = &adapter->hw;
4212
4213 /*
4214 * With 82574 controllers, PHY needs to be checked periodically
4215 * for hung state and reset, if two calls return true
4216 */
4217 if (e1000_check_phy_82574(hw))
4218 adapter->phy_hang_count++;
4219 else
4220 adapter->phy_hang_count = 0;
4221
4222 if (adapter->phy_hang_count > 1) {
4223 adapter->phy_hang_count = 0;
4224 schedule_work(&adapter->reset_task);
4225 }
4226}
4227
4228/**
4229 * e1000_watchdog - Timer Call-back
4230 * @data: pointer to adapter cast into an unsigned long
4231 **/
4232static void e1000_watchdog(unsigned long data)
4233{
4234 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4235
4236 /* Do the rest outside of interrupt context */
4237 schedule_work(&adapter->watchdog_task);
4238
4239 /* TODO: make this use queue_delayed_work() */
4240}
4241
4242static void e1000_watchdog_task(struct work_struct *work)
4243{
4244 struct e1000_adapter *adapter = container_of(work,
4245 struct e1000_adapter, watchdog_task);
4246 struct net_device *netdev = adapter->netdev;
4247 struct e1000_mac_info *mac = &adapter->hw.mac;
4248 struct e1000_phy_info *phy = &adapter->hw.phy;
4249 struct e1000_ring *tx_ring = adapter->tx_ring;
4250 struct e1000_hw *hw = &adapter->hw;
4251 u32 link, tctl;
4252
4253 if (test_bit(__E1000_DOWN, &adapter->state))
4254 return;
4255
4256 link = e1000e_has_link(adapter);
4257 if ((netif_carrier_ok(netdev)) && link) {
4258 /* Cancel scheduled suspend requests. */
4259 pm_runtime_resume(netdev->dev.parent);
4260
4261 e1000e_enable_receives(adapter);
4262 goto link_up;
4263 }
4264
4265 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4266 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4267 e1000_update_mng_vlan(adapter);
4268
4269 if (link) {
4270 if (!netif_carrier_ok(netdev)) {
4271 bool txb2b = 1;
4272
4273 /* Cancel scheduled suspend requests. */
4274 pm_runtime_resume(netdev->dev.parent);
4275
4276 /* update snapshot of PHY registers on LSC */
4277 e1000_phy_read_status(adapter);
4278 mac->ops.get_link_up_info(&adapter->hw,
4279 &adapter->link_speed,
4280 &adapter->link_duplex);
4281 e1000_print_link_info(adapter);
4282 /*
4283 * On supported PHYs, check for duplex mismatch only
4284 * if link has autonegotiated at 10/100 half
4285 */
4286 if ((hw->phy.type == e1000_phy_igp_3 ||
4287 hw->phy.type == e1000_phy_bm) &&
4288 (hw->mac.autoneg == true) &&
4289 (adapter->link_speed == SPEED_10 ||
4290 adapter->link_speed == SPEED_100) &&
4291 (adapter->link_duplex == HALF_DUPLEX)) {
4292 u16 autoneg_exp;
4293
4294 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4295
4296 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4297 e_info("Autonegotiated half duplex but"
4298 " link partner cannot autoneg. "
4299 " Try forcing full duplex if "
4300 "link gets many collisions.\n");
4301 }
4302
4303 /* adjust timeout factor according to speed/duplex */
4304 adapter->tx_timeout_factor = 1;
4305 switch (adapter->link_speed) {
4306 case SPEED_10:
4307 txb2b = 0;
4308 adapter->tx_timeout_factor = 16;
4309 break;
4310 case SPEED_100:
4311 txb2b = 0;
4312 adapter->tx_timeout_factor = 10;
4313 break;
4314 }
4315
4316 /*
4317 * workaround: re-program speed mode bit after
4318 * link-up event
4319 */
4320 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4321 !txb2b) {
4322 u32 tarc0;
4323 tarc0 = er32(TARC(0));
4324 tarc0 &= ~SPEED_MODE_BIT;
4325 ew32(TARC(0), tarc0);
4326 }
4327
4328 /*
4329 * disable TSO for pcie and 10/100 speeds, to avoid
4330 * some hardware issues
4331 */
4332 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4333 switch (adapter->link_speed) {
4334 case SPEED_10:
4335 case SPEED_100:
4336 e_info("10/100 speed: disabling TSO\n");
4337 netdev->features &= ~NETIF_F_TSO;
4338 netdev->features &= ~NETIF_F_TSO6;
4339 break;
4340 case SPEED_1000:
4341 netdev->features |= NETIF_F_TSO;
4342 netdev->features |= NETIF_F_TSO6;
4343 break;
4344 default:
4345 /* oops */
4346 break;
4347 }
4348 }
4349
4350 /*
4351 * enable transmits in the hardware, need to do this
4352 * after setting TARC(0)
4353 */
4354 tctl = er32(TCTL);
4355 tctl |= E1000_TCTL_EN;
4356 ew32(TCTL, tctl);
4357
4358 /*
4359 * Perform any post-link-up configuration before
4360 * reporting link up.
4361 */
4362 if (phy->ops.cfg_on_link_up)
4363 phy->ops.cfg_on_link_up(hw);
4364
4365 netif_carrier_on(netdev);
4366
4367 if (!test_bit(__E1000_DOWN, &adapter->state))
4368 mod_timer(&adapter->phy_info_timer,
4369 round_jiffies(jiffies + 2 * HZ));
4370 }
4371 } else {
4372 if (netif_carrier_ok(netdev)) {
4373 adapter->link_speed = 0;
4374 adapter->link_duplex = 0;
4375 /* Link status message must follow this format */
4376 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4377 adapter->netdev->name);
4378 netif_carrier_off(netdev);
4379 if (!test_bit(__E1000_DOWN, &adapter->state))
4380 mod_timer(&adapter->phy_info_timer,
4381 round_jiffies(jiffies + 2 * HZ));
4382
4383 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4384 schedule_work(&adapter->reset_task);
4385 else
4386 pm_schedule_suspend(netdev->dev.parent,
4387 LINK_TIMEOUT);
4388 }
4389 }
4390
4391link_up:
4392 spin_lock(&adapter->stats64_lock);
4393 e1000e_update_stats(adapter);
4394
4395 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4396 adapter->tpt_old = adapter->stats.tpt;
4397 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4398 adapter->colc_old = adapter->stats.colc;
4399
4400 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4401 adapter->gorc_old = adapter->stats.gorc;
4402 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4403 adapter->gotc_old = adapter->stats.gotc;
4404 spin_unlock(&adapter->stats64_lock);
4405
4406 e1000e_update_adaptive(&adapter->hw);
4407
4408 if (!netif_carrier_ok(netdev) &&
4409 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4410 /*
4411 * We've lost link, so the controller stops DMA,
4412 * but we've got queued Tx work that's never going
4413 * to get done, so reset controller to flush Tx.
4414 * (Do the reset outside of interrupt context).
4415 */
4416 schedule_work(&adapter->reset_task);
4417 /* return immediately since reset is imminent */
4418 return;
4419 }
4420
4421 /* Simple mode for Interrupt Throttle Rate (ITR) */
4422 if (adapter->itr_setting == 4) {
4423 /*
4424 * Symmetric Tx/Rx gets a reduced ITR=2000;
4425 * Total asymmetrical Tx or Rx gets ITR=8000;
4426 * everyone else is between 2000-8000.
4427 */
4428 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4429 u32 dif = (adapter->gotc > adapter->gorc ?
4430 adapter->gotc - adapter->gorc :
4431 adapter->gorc - adapter->gotc) / 10000;
4432 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4433
4434 ew32(ITR, 1000000000 / (itr * 256));
4435 }
4436
4437 /* Cause software interrupt to ensure Rx ring is cleaned */
4438 if (adapter->msix_entries)
4439 ew32(ICS, adapter->rx_ring->ims_val);
4440 else
4441 ew32(ICS, E1000_ICS_RXDMT0);
4442
4443 /* flush pending descriptors to memory before detecting Tx hang */
4444 e1000e_flush_descriptors(adapter);
4445
4446 /* Force detection of hung controller every watchdog period */
4447 adapter->detect_tx_hung = 1;
4448
4449 /*
4450 * With 82571 controllers, LAA may be overwritten due to controller
4451 * reset from the other port. Set the appropriate LAA in RAR[0]
4452 */
4453 if (e1000e_get_laa_state_82571(hw))
4454 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4455
4456 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4457 e1000e_check_82574_phy_workaround(adapter);
4458
4459 /* Reset the timer */
4460 if (!test_bit(__E1000_DOWN, &adapter->state))
4461 mod_timer(&adapter->watchdog_timer,
4462 round_jiffies(jiffies + 2 * HZ));
4463}
4464
4465#define E1000_TX_FLAGS_CSUM 0x00000001
4466#define E1000_TX_FLAGS_VLAN 0x00000002
4467#define E1000_TX_FLAGS_TSO 0x00000004
4468#define E1000_TX_FLAGS_IPV4 0x00000008
4469#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4470#define E1000_TX_FLAGS_VLAN_SHIFT 16
4471
4472static int e1000_tso(struct e1000_adapter *adapter,
4473 struct sk_buff *skb)
4474{
4475 struct e1000_ring *tx_ring = adapter->tx_ring;
4476 struct e1000_context_desc *context_desc;
4477 struct e1000_buffer *buffer_info;
4478 unsigned int i;
4479 u32 cmd_length = 0;
4480 u16 ipcse = 0, tucse, mss;
4481 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4482
4483 if (!skb_is_gso(skb))
4484 return 0;
4485
4486 if (skb_header_cloned(skb)) {
4487 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4488
4489 if (err)
4490 return err;
4491 }
4492
4493 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4494 mss = skb_shinfo(skb)->gso_size;
4495 if (skb->protocol == htons(ETH_P_IP)) {
4496 struct iphdr *iph = ip_hdr(skb);
4497 iph->tot_len = 0;
4498 iph->check = 0;
4499 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4500 0, IPPROTO_TCP, 0);
4501 cmd_length = E1000_TXD_CMD_IP;
4502 ipcse = skb_transport_offset(skb) - 1;
4503 } else if (skb_is_gso_v6(skb)) {
4504 ipv6_hdr(skb)->payload_len = 0;
4505 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4506 &ipv6_hdr(skb)->daddr,
4507 0, IPPROTO_TCP, 0);
4508 ipcse = 0;
4509 }
4510 ipcss = skb_network_offset(skb);
4511 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4512 tucss = skb_transport_offset(skb);
4513 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4514 tucse = 0;
4515
4516 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4517 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4518
4519 i = tx_ring->next_to_use;
4520 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4521 buffer_info = &tx_ring->buffer_info[i];
4522
4523 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4524 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4525 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4526 context_desc->upper_setup.tcp_fields.tucss = tucss;
4527 context_desc->upper_setup.tcp_fields.tucso = tucso;
4528 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4529 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4530 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4531 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4532
4533 buffer_info->time_stamp = jiffies;
4534 buffer_info->next_to_watch = i;
4535
4536 i++;
4537 if (i == tx_ring->count)
4538 i = 0;
4539 tx_ring->next_to_use = i;
4540
4541 return 1;
4542}
4543
4544static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4545{
4546 struct e1000_ring *tx_ring = adapter->tx_ring;
4547 struct e1000_context_desc *context_desc;
4548 struct e1000_buffer *buffer_info;
4549 unsigned int i;
4550 u8 css;
4551 u32 cmd_len = E1000_TXD_CMD_DEXT;
4552 __be16 protocol;
4553
4554 if (skb->ip_summed != CHECKSUM_PARTIAL)
4555 return 0;
4556
4557 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4558 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4559 else
4560 protocol = skb->protocol;
4561
4562 switch (protocol) {
4563 case cpu_to_be16(ETH_P_IP):
4564 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4565 cmd_len |= E1000_TXD_CMD_TCP;
4566 break;
4567 case cpu_to_be16(ETH_P_IPV6):
4568 /* XXX not handling all IPV6 headers */
4569 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4570 cmd_len |= E1000_TXD_CMD_TCP;
4571 break;
4572 default:
4573 if (unlikely(net_ratelimit()))
4574 e_warn("checksum_partial proto=%x!\n",
4575 be16_to_cpu(protocol));
4576 break;
4577 }
4578
4579 css = skb_checksum_start_offset(skb);
4580
4581 i = tx_ring->next_to_use;
4582 buffer_info = &tx_ring->buffer_info[i];
4583 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4584
4585 context_desc->lower_setup.ip_config = 0;
4586 context_desc->upper_setup.tcp_fields.tucss = css;
4587 context_desc->upper_setup.tcp_fields.tucso =
4588 css + skb->csum_offset;
4589 context_desc->upper_setup.tcp_fields.tucse = 0;
4590 context_desc->tcp_seg_setup.data = 0;
4591 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4592
4593 buffer_info->time_stamp = jiffies;
4594 buffer_info->next_to_watch = i;
4595
4596 i++;
4597 if (i == tx_ring->count)
4598 i = 0;
4599 tx_ring->next_to_use = i;
4600
4601 return 1;
4602}
4603
4604#define E1000_MAX_PER_TXD 8192
4605#define E1000_MAX_TXD_PWR 12
4606
4607static int e1000_tx_map(struct e1000_adapter *adapter,
4608 struct sk_buff *skb, unsigned int first,
4609 unsigned int max_per_txd, unsigned int nr_frags,
4610 unsigned int mss)
4611{
4612 struct e1000_ring *tx_ring = adapter->tx_ring;
4613 struct pci_dev *pdev = adapter->pdev;
4614 struct e1000_buffer *buffer_info;
4615 unsigned int len = skb_headlen(skb);
4616 unsigned int offset = 0, size, count = 0, i;
4617 unsigned int f, bytecount, segs;
4618
4619 i = tx_ring->next_to_use;
4620
4621 while (len) {
4622 buffer_info = &tx_ring->buffer_info[i];
4623 size = min(len, max_per_txd);
4624
4625 buffer_info->length = size;
4626 buffer_info->time_stamp = jiffies;
4627 buffer_info->next_to_watch = i;
4628 buffer_info->dma = dma_map_single(&pdev->dev,
4629 skb->data + offset,
4630 size, DMA_TO_DEVICE);
4631 buffer_info->mapped_as_page = false;
4632 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4633 goto dma_error;
4634
4635 len -= size;
4636 offset += size;
4637 count++;
4638
4639 if (len) {
4640 i++;
4641 if (i == tx_ring->count)
4642 i = 0;
4643 }
4644 }
4645
4646 for (f = 0; f < nr_frags; f++) {
4647 struct skb_frag_struct *frag;
4648
4649 frag = &skb_shinfo(skb)->frags[f];
4650 len = frag->size;
4651 offset = frag->page_offset;
4652
4653 while (len) {
4654 i++;
4655 if (i == tx_ring->count)
4656 i = 0;
4657
4658 buffer_info = &tx_ring->buffer_info[i];
4659 size = min(len, max_per_txd);
4660
4661 buffer_info->length = size;
4662 buffer_info->time_stamp = jiffies;
4663 buffer_info->next_to_watch = i;
4664 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
4665 offset, size,
4666 DMA_TO_DEVICE);
4667 buffer_info->mapped_as_page = true;
4668 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4669 goto dma_error;
4670
4671 len -= size;
4672 offset += size;
4673 count++;
4674 }
4675 }
4676
4677 segs = skb_shinfo(skb)->gso_segs ? : 1;
4678 /* multiply data chunks by size of headers */
4679 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4680
4681 tx_ring->buffer_info[i].skb = skb;
4682 tx_ring->buffer_info[i].segs = segs;
4683 tx_ring->buffer_info[i].bytecount = bytecount;
4684 tx_ring->buffer_info[first].next_to_watch = i;
4685
4686 return count;
4687
4688dma_error:
4689 dev_err(&pdev->dev, "Tx DMA map failed\n");
4690 buffer_info->dma = 0;
4691 if (count)
4692 count--;
4693
4694 while (count--) {
4695 if (i == 0)
4696 i += tx_ring->count;
4697 i--;
4698 buffer_info = &tx_ring->buffer_info[i];
4699 e1000_put_txbuf(adapter, buffer_info);
4700 }
4701
4702 return 0;
4703}
4704
4705static void e1000_tx_queue(struct e1000_adapter *adapter,
4706 int tx_flags, int count)
4707{
4708 struct e1000_ring *tx_ring = adapter->tx_ring;
4709 struct e1000_tx_desc *tx_desc = NULL;
4710 struct e1000_buffer *buffer_info;
4711 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4712 unsigned int i;
4713
4714 if (tx_flags & E1000_TX_FLAGS_TSO) {
4715 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4716 E1000_TXD_CMD_TSE;
4717 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4718
4719 if (tx_flags & E1000_TX_FLAGS_IPV4)
4720 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4721 }
4722
4723 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4724 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4725 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4726 }
4727
4728 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4729 txd_lower |= E1000_TXD_CMD_VLE;
4730 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4731 }
4732
4733 i = tx_ring->next_to_use;
4734
4735 do {
4736 buffer_info = &tx_ring->buffer_info[i];
4737 tx_desc = E1000_TX_DESC(*tx_ring, i);
4738 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4739 tx_desc->lower.data =
4740 cpu_to_le32(txd_lower | buffer_info->length);
4741 tx_desc->upper.data = cpu_to_le32(txd_upper);
4742
4743 i++;
4744 if (i == tx_ring->count)
4745 i = 0;
4746 } while (--count > 0);
4747
4748 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4749
4750 /*
4751 * Force memory writes to complete before letting h/w
4752 * know there are new descriptors to fetch. (Only
4753 * applicable for weak-ordered memory model archs,
4754 * such as IA-64).
4755 */
4756 wmb();
4757
4758 tx_ring->next_to_use = i;
4759
4760 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4761 e1000e_update_tdt_wa(adapter, i);
4762 else
4763 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4764
4765 /*
4766 * we need this if more than one processor can write to our tail
4767 * at a time, it synchronizes IO on IA64/Altix systems
4768 */
4769 mmiowb();
4770}
4771
4772#define MINIMUM_DHCP_PACKET_SIZE 282
4773static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4774 struct sk_buff *skb)
4775{
4776 struct e1000_hw *hw = &adapter->hw;
4777 u16 length, offset;
4778
4779 if (vlan_tx_tag_present(skb)) {
4780 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4781 (adapter->hw.mng_cookie.status &
4782 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4783 return 0;
4784 }
4785
4786 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4787 return 0;
4788
4789 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4790 return 0;
4791
4792 {
4793 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4794 struct udphdr *udp;
4795
4796 if (ip->protocol != IPPROTO_UDP)
4797 return 0;
4798
4799 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4800 if (ntohs(udp->dest) != 67)
4801 return 0;
4802
4803 offset = (u8 *)udp + 8 - skb->data;
4804 length = skb->len - offset;
4805 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4806 }
4807
4808 return 0;
4809}
4810
4811static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4812{
4813 struct e1000_adapter *adapter = netdev_priv(netdev);
4814
4815 netif_stop_queue(netdev);
4816 /*
4817 * Herbert's original patch had:
4818 * smp_mb__after_netif_stop_queue();
4819 * but since that doesn't exist yet, just open code it.
4820 */
4821 smp_mb();
4822
4823 /*
4824 * We need to check again in a case another CPU has just
4825 * made room available.
4826 */
4827 if (e1000_desc_unused(adapter->tx_ring) < size)
4828 return -EBUSY;
4829
4830 /* A reprieve! */
4831 netif_start_queue(netdev);
4832 ++adapter->restart_queue;
4833 return 0;
4834}
4835
4836static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4837{
4838 struct e1000_adapter *adapter = netdev_priv(netdev);
4839
4840 if (e1000_desc_unused(adapter->tx_ring) >= size)
4841 return 0;
4842 return __e1000_maybe_stop_tx(netdev, size);
4843}
4844
4845#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4846static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4847 struct net_device *netdev)
4848{
4849 struct e1000_adapter *adapter = netdev_priv(netdev);
4850 struct e1000_ring *tx_ring = adapter->tx_ring;
4851 unsigned int first;
4852 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4853 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4854 unsigned int tx_flags = 0;
4855 unsigned int len = skb_headlen(skb);
4856 unsigned int nr_frags;
4857 unsigned int mss;
4858 int count = 0;
4859 int tso;
4860 unsigned int f;
4861
4862 if (test_bit(__E1000_DOWN, &adapter->state)) {
4863 dev_kfree_skb_any(skb);
4864 return NETDEV_TX_OK;
4865 }
4866
4867 if (skb->len <= 0) {
4868 dev_kfree_skb_any(skb);
4869 return NETDEV_TX_OK;
4870 }
4871
4872 mss = skb_shinfo(skb)->gso_size;
4873 /*
4874 * The controller does a simple calculation to
4875 * make sure there is enough room in the FIFO before
4876 * initiating the DMA for each buffer. The calc is:
4877 * 4 = ceil(buffer len/mss). To make sure we don't
4878 * overrun the FIFO, adjust the max buffer len if mss
4879 * drops.
4880 */
4881 if (mss) {
4882 u8 hdr_len;
4883 max_per_txd = min(mss << 2, max_per_txd);
4884 max_txd_pwr = fls(max_per_txd) - 1;
4885
4886 /*
4887 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4888 * points to just header, pull a few bytes of payload from
4889 * frags into skb->data
4890 */
4891 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4892 /*
4893 * we do this workaround for ES2LAN, but it is un-necessary,
4894 * avoiding it could save a lot of cycles
4895 */
4896 if (skb->data_len && (hdr_len == len)) {
4897 unsigned int pull_size;
4898
4899 pull_size = min((unsigned int)4, skb->data_len);
4900 if (!__pskb_pull_tail(skb, pull_size)) {
4901 e_err("__pskb_pull_tail failed.\n");
4902 dev_kfree_skb_any(skb);
4903 return NETDEV_TX_OK;
4904 }
4905 len = skb_headlen(skb);
4906 }
4907 }
4908
4909 /* reserve a descriptor for the offload context */
4910 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4911 count++;
4912 count++;
4913
4914 count += TXD_USE_COUNT(len, max_txd_pwr);
4915
4916 nr_frags = skb_shinfo(skb)->nr_frags;
4917 for (f = 0; f < nr_frags; f++)
4918 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4919 max_txd_pwr);
4920
4921 if (adapter->hw.mac.tx_pkt_filtering)
4922 e1000_transfer_dhcp_info(adapter, skb);
4923
4924 /*
4925 * need: count + 2 desc gap to keep tail from touching
4926 * head, otherwise try next time
4927 */
4928 if (e1000_maybe_stop_tx(netdev, count + 2))
4929 return NETDEV_TX_BUSY;
4930
4931 if (vlan_tx_tag_present(skb)) {
4932 tx_flags |= E1000_TX_FLAGS_VLAN;
4933 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4934 }
4935
4936 first = tx_ring->next_to_use;
4937
4938 tso = e1000_tso(adapter, skb);
4939 if (tso < 0) {
4940 dev_kfree_skb_any(skb);
4941 return NETDEV_TX_OK;
4942 }
4943
4944 if (tso)
4945 tx_flags |= E1000_TX_FLAGS_TSO;
4946 else if (e1000_tx_csum(adapter, skb))
4947 tx_flags |= E1000_TX_FLAGS_CSUM;
4948
4949 /*
4950 * Old method was to assume IPv4 packet by default if TSO was enabled.
4951 * 82571 hardware supports TSO capabilities for IPv6 as well...
4952 * no longer assume, we must.
4953 */
4954 if (skb->protocol == htons(ETH_P_IP))
4955 tx_flags |= E1000_TX_FLAGS_IPV4;
4956
4957 /* if count is 0 then mapping error has occurred */
4958 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4959 if (count) {
4960 e1000_tx_queue(adapter, tx_flags, count);
4961 /* Make sure there is space in the ring for the next send. */
4962 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4963
4964 } else {
4965 dev_kfree_skb_any(skb);
4966 tx_ring->buffer_info[first].time_stamp = 0;
4967 tx_ring->next_to_use = first;
4968 }
4969
4970 return NETDEV_TX_OK;
4971}
4972
4973/**
4974 * e1000_tx_timeout - Respond to a Tx Hang
4975 * @netdev: network interface device structure
4976 **/
4977static void e1000_tx_timeout(struct net_device *netdev)
4978{
4979 struct e1000_adapter *adapter = netdev_priv(netdev);
4980
4981 /* Do the reset outside of interrupt context */
4982 adapter->tx_timeout_count++;
4983 schedule_work(&adapter->reset_task);
4984}
4985
4986static void e1000_reset_task(struct work_struct *work)
4987{
4988 struct e1000_adapter *adapter;
4989 adapter = container_of(work, struct e1000_adapter, reset_task);
4990
4991 /* don't run the task if already down */
4992 if (test_bit(__E1000_DOWN, &adapter->state))
4993 return;
4994
4995 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4996 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4997 e1000e_dump(adapter);
4998 e_err("Reset adapter\n");
4999 }
5000 e1000e_reinit_locked(adapter);
5001}
5002
5003/**
5004 * e1000_get_stats64 - Get System Network Statistics
5005 * @netdev: network interface device structure
5006 * @stats: rtnl_link_stats64 pointer
5007 *
5008 * Returns the address of the device statistics structure.
5009 **/
5010struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5011 struct rtnl_link_stats64 *stats)
5012{
5013 struct e1000_adapter *adapter = netdev_priv(netdev);
5014
5015 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5016 spin_lock(&adapter->stats64_lock);
5017 e1000e_update_stats(adapter);
5018 /* Fill out the OS statistics structure */
5019 stats->rx_bytes = adapter->stats.gorc;
5020 stats->rx_packets = adapter->stats.gprc;
5021 stats->tx_bytes = adapter->stats.gotc;
5022 stats->tx_packets = adapter->stats.gptc;
5023 stats->multicast = adapter->stats.mprc;
5024 stats->collisions = adapter->stats.colc;
5025
5026 /* Rx Errors */
5027
5028 /*
5029 * RLEC on some newer hardware can be incorrect so build
5030 * our own version based on RUC and ROC
5031 */
5032 stats->rx_errors = adapter->stats.rxerrc +
5033 adapter->stats.crcerrs + adapter->stats.algnerrc +
5034 adapter->stats.ruc + adapter->stats.roc +
5035 adapter->stats.cexterr;
5036 stats->rx_length_errors = adapter->stats.ruc +
5037 adapter->stats.roc;
5038 stats->rx_crc_errors = adapter->stats.crcerrs;
5039 stats->rx_frame_errors = adapter->stats.algnerrc;
5040 stats->rx_missed_errors = adapter->stats.mpc;
5041
5042 /* Tx Errors */
5043 stats->tx_errors = adapter->stats.ecol +
5044 adapter->stats.latecol;
5045 stats->tx_aborted_errors = adapter->stats.ecol;
5046 stats->tx_window_errors = adapter->stats.latecol;
5047 stats->tx_carrier_errors = adapter->stats.tncrs;
5048
5049 /* Tx Dropped needs to be maintained elsewhere */
5050
5051 spin_unlock(&adapter->stats64_lock);
5052 return stats;
5053}
5054
5055/**
5056 * e1000_change_mtu - Change the Maximum Transfer Unit
5057 * @netdev: network interface device structure
5058 * @new_mtu: new value for maximum frame size
5059 *
5060 * Returns 0 on success, negative on failure
5061 **/
5062static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5063{
5064 struct e1000_adapter *adapter = netdev_priv(netdev);
5065 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5066
5067 /* Jumbo frame support */
5068 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5069 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5070 e_err("Jumbo Frames not supported.\n");
5071 return -EINVAL;
5072 }
5073
5074 /* Supported frame sizes */
5075 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5076 (max_frame > adapter->max_hw_frame_size)) {
5077 e_err("Unsupported MTU setting\n");
5078 return -EINVAL;
5079 }
5080
5081 /* Jumbo frame workaround on 82579 requires CRC be stripped */
5082 if ((adapter->hw.mac.type == e1000_pch2lan) &&
5083 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5084 (new_mtu > ETH_DATA_LEN)) {
5085 e_err("Jumbo Frames not supported on 82579 when CRC "
5086 "stripping is disabled.\n");
5087 return -EINVAL;
5088 }
5089
5090 /* 82573 Errata 17 */
5091 if (((adapter->hw.mac.type == e1000_82573) ||
5092 (adapter->hw.mac.type == e1000_82574)) &&
5093 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5094 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5095 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5096 }
5097
5098 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5099 usleep_range(1000, 2000);
5100 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5101 adapter->max_frame_size = max_frame;
5102 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5103 netdev->mtu = new_mtu;
5104 if (netif_running(netdev))
5105 e1000e_down(adapter);
5106
5107 /*
5108 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5109 * means we reserve 2 more, this pushes us to allocate from the next
5110 * larger slab size.
5111 * i.e. RXBUFFER_2048 --> size-4096 slab
5112 * However with the new *_jumbo_rx* routines, jumbo receives will use
5113 * fragmented skbs
5114 */
5115
5116 if (max_frame <= 2048)
5117 adapter->rx_buffer_len = 2048;
5118 else
5119 adapter->rx_buffer_len = 4096;
5120
5121 /* adjust allocation if LPE protects us, and we aren't using SBP */
5122 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5123 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5124 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5125 + ETH_FCS_LEN;
5126
5127 if (netif_running(netdev))
5128 e1000e_up(adapter);
5129 else
5130 e1000e_reset(adapter);
5131
5132 clear_bit(__E1000_RESETTING, &adapter->state);
5133
5134 return 0;
5135}
5136
5137static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5138 int cmd)
5139{
5140 struct e1000_adapter *adapter = netdev_priv(netdev);
5141 struct mii_ioctl_data *data = if_mii(ifr);
5142
5143 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5144 return -EOPNOTSUPP;
5145
5146 switch (cmd) {
5147 case SIOCGMIIPHY:
5148 data->phy_id = adapter->hw.phy.addr;
5149 break;
5150 case SIOCGMIIREG:
5151 e1000_phy_read_status(adapter);
5152
5153 switch (data->reg_num & 0x1F) {
5154 case MII_BMCR:
5155 data->val_out = adapter->phy_regs.bmcr;
5156 break;
5157 case MII_BMSR:
5158 data->val_out = adapter->phy_regs.bmsr;
5159 break;
5160 case MII_PHYSID1:
5161 data->val_out = (adapter->hw.phy.id >> 16);
5162 break;
5163 case MII_PHYSID2:
5164 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5165 break;
5166 case MII_ADVERTISE:
5167 data->val_out = adapter->phy_regs.advertise;
5168 break;
5169 case MII_LPA:
5170 data->val_out = adapter->phy_regs.lpa;
5171 break;
5172 case MII_EXPANSION:
5173 data->val_out = adapter->phy_regs.expansion;
5174 break;
5175 case MII_CTRL1000:
5176 data->val_out = adapter->phy_regs.ctrl1000;
5177 break;
5178 case MII_STAT1000:
5179 data->val_out = adapter->phy_regs.stat1000;
5180 break;
5181 case MII_ESTATUS:
5182 data->val_out = adapter->phy_regs.estatus;
5183 break;
5184 default:
5185 return -EIO;
5186 }
5187 break;
5188 case SIOCSMIIREG:
5189 default:
5190 return -EOPNOTSUPP;
5191 }
5192 return 0;
5193}
5194
5195static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5196{
5197 switch (cmd) {
5198 case SIOCGMIIPHY:
5199 case SIOCGMIIREG:
5200 case SIOCSMIIREG:
5201 return e1000_mii_ioctl(netdev, ifr, cmd);
5202 default:
5203 return -EOPNOTSUPP;
5204 }
5205}
5206
5207static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5208{
5209 struct e1000_hw *hw = &adapter->hw;
5210 u32 i, mac_reg;
5211 u16 phy_reg, wuc_enable;
5212 int retval = 0;
5213
5214 /* copy MAC RARs to PHY RARs */
5215 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5216
5217 retval = hw->phy.ops.acquire(hw);
5218 if (retval) {
5219 e_err("Could not acquire PHY\n");
5220 return retval;
5221 }
5222
5223 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5224 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5225 if (retval)
5226 goto out;
5227
5228 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5229 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5230 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5231 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5232 (u16)(mac_reg & 0xFFFF));
5233 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5234 (u16)((mac_reg >> 16) & 0xFFFF));
5235 }
5236
5237 /* configure PHY Rx Control register */
5238 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5239 mac_reg = er32(RCTL);
5240 if (mac_reg & E1000_RCTL_UPE)
5241 phy_reg |= BM_RCTL_UPE;
5242 if (mac_reg & E1000_RCTL_MPE)
5243 phy_reg |= BM_RCTL_MPE;
5244 phy_reg &= ~(BM_RCTL_MO_MASK);
5245 if (mac_reg & E1000_RCTL_MO_3)
5246 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5247 << BM_RCTL_MO_SHIFT);
5248 if (mac_reg & E1000_RCTL_BAM)
5249 phy_reg |= BM_RCTL_BAM;
5250 if (mac_reg & E1000_RCTL_PMCF)
5251 phy_reg |= BM_RCTL_PMCF;
5252 mac_reg = er32(CTRL);
5253 if (mac_reg & E1000_CTRL_RFCE)
5254 phy_reg |= BM_RCTL_RFCE;
5255 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5256
5257 /* enable PHY wakeup in MAC register */
5258 ew32(WUFC, wufc);
5259 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5260
5261 /* configure and enable PHY wakeup in PHY registers */
5262 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5263 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5264
5265 /* activate PHY wakeup */
5266 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5267 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5268 if (retval)
5269 e_err("Could not set PHY Host Wakeup bit\n");
5270out:
5271 hw->phy.ops.release(hw);
5272
5273 return retval;
5274}
5275
5276static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5277 bool runtime)
5278{
5279 struct net_device *netdev = pci_get_drvdata(pdev);
5280 struct e1000_adapter *adapter = netdev_priv(netdev);
5281 struct e1000_hw *hw = &adapter->hw;
5282 u32 ctrl, ctrl_ext, rctl, status;
5283 /* Runtime suspend should only enable wakeup for link changes */
5284 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5285 int retval = 0;
5286
5287 netif_device_detach(netdev);
5288
5289 if (netif_running(netdev)) {
5290 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5291 e1000e_down(adapter);
5292 e1000_free_irq(adapter);
5293 }
5294 e1000e_reset_interrupt_capability(adapter);
5295
5296 retval = pci_save_state(pdev);
5297 if (retval)
5298 return retval;
5299
5300 status = er32(STATUS);
5301 if (status & E1000_STATUS_LU)
5302 wufc &= ~E1000_WUFC_LNKC;
5303
5304 if (wufc) {
5305 e1000_setup_rctl(adapter);
5306 e1000_set_multi(netdev);
5307
5308 /* turn on all-multi mode if wake on multicast is enabled */
5309 if (wufc & E1000_WUFC_MC) {
5310 rctl = er32(RCTL);
5311 rctl |= E1000_RCTL_MPE;
5312 ew32(RCTL, rctl);
5313 }
5314
5315 ctrl = er32(CTRL);
5316 /* advertise wake from D3Cold */
5317 #define E1000_CTRL_ADVD3WUC 0x00100000
5318 /* phy power management enable */
5319 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5320 ctrl |= E1000_CTRL_ADVD3WUC;
5321 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5322 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5323 ew32(CTRL, ctrl);
5324
5325 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5326 adapter->hw.phy.media_type ==
5327 e1000_media_type_internal_serdes) {
5328 /* keep the laser running in D3 */
5329 ctrl_ext = er32(CTRL_EXT);
5330 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5331 ew32(CTRL_EXT, ctrl_ext);
5332 }
5333
5334 if (adapter->flags & FLAG_IS_ICH)
5335 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5336
5337 /* Allow time for pending master requests to run */
5338 e1000e_disable_pcie_master(&adapter->hw);
5339
5340 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5341 /* enable wakeup by the PHY */
5342 retval = e1000_init_phy_wakeup(adapter, wufc);
5343 if (retval)
5344 return retval;
5345 } else {
5346 /* enable wakeup by the MAC */
5347 ew32(WUFC, wufc);
5348 ew32(WUC, E1000_WUC_PME_EN);
5349 }
5350 } else {
5351 ew32(WUC, 0);
5352 ew32(WUFC, 0);
5353 }
5354
5355 *enable_wake = !!wufc;
5356
5357 /* make sure adapter isn't asleep if manageability is enabled */
5358 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5359 (hw->mac.ops.check_mng_mode(hw)))
5360 *enable_wake = true;
5361
5362 if (adapter->hw.phy.type == e1000_phy_igp_3)
5363 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5364
5365 /*
5366 * Release control of h/w to f/w. If f/w is AMT enabled, this
5367 * would have already happened in close and is redundant.
5368 */
5369 e1000e_release_hw_control(adapter);
5370
5371 pci_disable_device(pdev);
5372
5373 return 0;
5374}
5375
5376static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5377{
5378 if (sleep && wake) {
5379 pci_prepare_to_sleep(pdev);
5380 return;
5381 }
5382
5383 pci_wake_from_d3(pdev, wake);
5384 pci_set_power_state(pdev, PCI_D3hot);
5385}
5386
5387static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5388 bool wake)
5389{
5390 struct net_device *netdev = pci_get_drvdata(pdev);
5391 struct e1000_adapter *adapter = netdev_priv(netdev);
5392
5393 /*
5394 * The pci-e switch on some quad port adapters will report a
5395 * correctable error when the MAC transitions from D0 to D3. To
5396 * prevent this we need to mask off the correctable errors on the
5397 * downstream port of the pci-e switch.
5398 */
5399 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5400 struct pci_dev *us_dev = pdev->bus->self;
5401 int pos = pci_pcie_cap(us_dev);
5402 u16 devctl;
5403
5404 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5405 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5406 (devctl & ~PCI_EXP_DEVCTL_CERE));
5407
5408 e1000_power_off(pdev, sleep, wake);
5409
5410 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5411 } else {
5412 e1000_power_off(pdev, sleep, wake);
5413 }
5414}
5415
5416#ifdef CONFIG_PCIEASPM
5417static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5418{
5419 pci_disable_link_state_locked(pdev, state);
5420}
5421#else
5422static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5423{
5424 int pos;
5425 u16 reg16;
5426
5427 /*
5428 * Both device and parent should have the same ASPM setting.
5429 * Disable ASPM in downstream component first and then upstream.
5430 */
5431 pos = pci_pcie_cap(pdev);
5432 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
5433 reg16 &= ~state;
5434 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5435
5436 if (!pdev->bus->self)
5437 return;
5438
5439 pos = pci_pcie_cap(pdev->bus->self);
5440 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
5441 reg16 &= ~state;
5442 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5443}
5444#endif
5445static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5446{
5447 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5448 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5449 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5450
5451 __e1000e_disable_aspm(pdev, state);
5452}
5453
5454#ifdef CONFIG_PM
5455static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5456{
5457 return !!adapter->tx_ring->buffer_info;
5458}
5459
5460static int __e1000_resume(struct pci_dev *pdev)
5461{
5462 struct net_device *netdev = pci_get_drvdata(pdev);
5463 struct e1000_adapter *adapter = netdev_priv(netdev);
5464 struct e1000_hw *hw = &adapter->hw;
5465 u16 aspm_disable_flag = 0;
5466 u32 err;
5467
5468 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5469 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5470 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5471 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5472 if (aspm_disable_flag)
5473 e1000e_disable_aspm(pdev, aspm_disable_flag);
5474
5475 pci_set_power_state(pdev, PCI_D0);
5476 pci_restore_state(pdev);
5477 pci_save_state(pdev);
5478
5479 e1000e_set_interrupt_capability(adapter);
5480 if (netif_running(netdev)) {
5481 err = e1000_request_irq(adapter);
5482 if (err)
5483 return err;
5484 }
5485
5486 if (hw->mac.type == e1000_pch2lan)
5487 e1000_resume_workarounds_pchlan(&adapter->hw);
5488
5489 e1000e_power_up_phy(adapter);
5490
5491 /* report the system wakeup cause from S3/S4 */
5492 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5493 u16 phy_data;
5494
5495 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5496 if (phy_data) {
5497 e_info("PHY Wakeup cause - %s\n",
5498 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5499 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5500 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5501 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5502 phy_data & E1000_WUS_LNKC ? "Link Status "
5503 " Change" : "other");
5504 }
5505 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5506 } else {
5507 u32 wus = er32(WUS);
5508 if (wus) {
5509 e_info("MAC Wakeup cause - %s\n",
5510 wus & E1000_WUS_EX ? "Unicast Packet" :
5511 wus & E1000_WUS_MC ? "Multicast Packet" :
5512 wus & E1000_WUS_BC ? "Broadcast Packet" :
5513 wus & E1000_WUS_MAG ? "Magic Packet" :
5514 wus & E1000_WUS_LNKC ? "Link Status Change" :
5515 "other");
5516 }
5517 ew32(WUS, ~0);
5518 }
5519
5520 e1000e_reset(adapter);
5521
5522 e1000_init_manageability_pt(adapter);
5523
5524 if (netif_running(netdev))
5525 e1000e_up(adapter);
5526
5527 netif_device_attach(netdev);
5528
5529 /*
5530 * If the controller has AMT, do not set DRV_LOAD until the interface
5531 * is up. For all other cases, let the f/w know that the h/w is now
5532 * under the control of the driver.
5533 */
5534 if (!(adapter->flags & FLAG_HAS_AMT))
5535 e1000e_get_hw_control(adapter);
5536
5537 return 0;
5538}
5539
5540#ifdef CONFIG_PM_SLEEP
5541static int e1000_suspend(struct device *dev)
5542{
5543 struct pci_dev *pdev = to_pci_dev(dev);
5544 int retval;
5545 bool wake;
5546
5547 retval = __e1000_shutdown(pdev, &wake, false);
5548 if (!retval)
5549 e1000_complete_shutdown(pdev, true, wake);
5550
5551 return retval;
5552}
5553
5554static int e1000_resume(struct device *dev)
5555{
5556 struct pci_dev *pdev = to_pci_dev(dev);
5557 struct net_device *netdev = pci_get_drvdata(pdev);
5558 struct e1000_adapter *adapter = netdev_priv(netdev);
5559
5560 if (e1000e_pm_ready(adapter))
5561 adapter->idle_check = true;
5562
5563 return __e1000_resume(pdev);
5564}
5565#endif /* CONFIG_PM_SLEEP */
5566
5567#ifdef CONFIG_PM_RUNTIME
5568static int e1000_runtime_suspend(struct device *dev)
5569{
5570 struct pci_dev *pdev = to_pci_dev(dev);
5571 struct net_device *netdev = pci_get_drvdata(pdev);
5572 struct e1000_adapter *adapter = netdev_priv(netdev);
5573
5574 if (e1000e_pm_ready(adapter)) {
5575 bool wake;
5576
5577 __e1000_shutdown(pdev, &wake, true);
5578 }
5579
5580 return 0;
5581}
5582
5583static int e1000_idle(struct device *dev)
5584{
5585 struct pci_dev *pdev = to_pci_dev(dev);
5586 struct net_device *netdev = pci_get_drvdata(pdev);
5587 struct e1000_adapter *adapter = netdev_priv(netdev);
5588
5589 if (!e1000e_pm_ready(adapter))
5590 return 0;
5591
5592 if (adapter->idle_check) {
5593 adapter->idle_check = false;
5594 if (!e1000e_has_link(adapter))
5595 pm_schedule_suspend(dev, MSEC_PER_SEC);
5596 }
5597
5598 return -EBUSY;
5599}
5600
5601static int e1000_runtime_resume(struct device *dev)
5602{
5603 struct pci_dev *pdev = to_pci_dev(dev);
5604 struct net_device *netdev = pci_get_drvdata(pdev);
5605 struct e1000_adapter *adapter = netdev_priv(netdev);
5606
5607 if (!e1000e_pm_ready(adapter))
5608 return 0;
5609
5610 adapter->idle_check = !dev->power.runtime_auto;
5611 return __e1000_resume(pdev);
5612}
5613#endif /* CONFIG_PM_RUNTIME */
5614#endif /* CONFIG_PM */
5615
5616static void e1000_shutdown(struct pci_dev *pdev)
5617{
5618 bool wake = false;
5619
5620 __e1000_shutdown(pdev, &wake, false);
5621
5622 if (system_state == SYSTEM_POWER_OFF)
5623 e1000_complete_shutdown(pdev, false, wake);
5624}
5625
5626#ifdef CONFIG_NET_POLL_CONTROLLER
5627
5628static irqreturn_t e1000_intr_msix(int irq, void *data)
5629{
5630 struct net_device *netdev = data;
5631 struct e1000_adapter *adapter = netdev_priv(netdev);
5632
5633 if (adapter->msix_entries) {
5634 int vector, msix_irq;
5635
5636 vector = 0;
5637 msix_irq = adapter->msix_entries[vector].vector;
5638 disable_irq(msix_irq);
5639 e1000_intr_msix_rx(msix_irq, netdev);
5640 enable_irq(msix_irq);
5641
5642 vector++;
5643 msix_irq = adapter->msix_entries[vector].vector;
5644 disable_irq(msix_irq);
5645 e1000_intr_msix_tx(msix_irq, netdev);
5646 enable_irq(msix_irq);
5647
5648 vector++;
5649 msix_irq = adapter->msix_entries[vector].vector;
5650 disable_irq(msix_irq);
5651 e1000_msix_other(msix_irq, netdev);
5652 enable_irq(msix_irq);
5653 }
5654
5655 return IRQ_HANDLED;
5656}
5657
5658/*
5659 * Polling 'interrupt' - used by things like netconsole to send skbs
5660 * without having to re-enable interrupts. It's not called while
5661 * the interrupt routine is executing.
5662 */
5663static void e1000_netpoll(struct net_device *netdev)
5664{
5665 struct e1000_adapter *adapter = netdev_priv(netdev);
5666
5667 switch (adapter->int_mode) {
5668 case E1000E_INT_MODE_MSIX:
5669 e1000_intr_msix(adapter->pdev->irq, netdev);
5670 break;
5671 case E1000E_INT_MODE_MSI:
5672 disable_irq(adapter->pdev->irq);
5673 e1000_intr_msi(adapter->pdev->irq, netdev);
5674 enable_irq(adapter->pdev->irq);
5675 break;
5676 default: /* E1000E_INT_MODE_LEGACY */
5677 disable_irq(adapter->pdev->irq);
5678 e1000_intr(adapter->pdev->irq, netdev);
5679 enable_irq(adapter->pdev->irq);
5680 break;
5681 }
5682}
5683#endif
5684
5685/**
5686 * e1000_io_error_detected - called when PCI error is detected
5687 * @pdev: Pointer to PCI device
5688 * @state: The current pci connection state
5689 *
5690 * This function is called after a PCI bus error affecting
5691 * this device has been detected.
5692 */
5693static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5694 pci_channel_state_t state)
5695{
5696 struct net_device *netdev = pci_get_drvdata(pdev);
5697 struct e1000_adapter *adapter = netdev_priv(netdev);
5698
5699 netif_device_detach(netdev);
5700
5701 if (state == pci_channel_io_perm_failure)
5702 return PCI_ERS_RESULT_DISCONNECT;
5703
5704 if (netif_running(netdev))
5705 e1000e_down(adapter);
5706 pci_disable_device(pdev);
5707
5708 /* Request a slot slot reset. */
5709 return PCI_ERS_RESULT_NEED_RESET;
5710}
5711
5712/**
5713 * e1000_io_slot_reset - called after the pci bus has been reset.
5714 * @pdev: Pointer to PCI device
5715 *
5716 * Restart the card from scratch, as if from a cold-boot. Implementation
5717 * resembles the first-half of the e1000_resume routine.
5718 */
5719static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5720{
5721 struct net_device *netdev = pci_get_drvdata(pdev);
5722 struct e1000_adapter *adapter = netdev_priv(netdev);
5723 struct e1000_hw *hw = &adapter->hw;
5724 u16 aspm_disable_flag = 0;
5725 int err;
5726 pci_ers_result_t result;
5727
5728 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5729 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5730 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5731 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5732 if (aspm_disable_flag)
5733 e1000e_disable_aspm(pdev, aspm_disable_flag);
5734
5735 err = pci_enable_device_mem(pdev);
5736 if (err) {
5737 dev_err(&pdev->dev,
5738 "Cannot re-enable PCI device after reset.\n");
5739 result = PCI_ERS_RESULT_DISCONNECT;
5740 } else {
5741 pci_set_master(pdev);
5742 pdev->state_saved = true;
5743 pci_restore_state(pdev);
5744
5745 pci_enable_wake(pdev, PCI_D3hot, 0);
5746 pci_enable_wake(pdev, PCI_D3cold, 0);
5747
5748 e1000e_reset(adapter);
5749 ew32(WUS, ~0);
5750 result = PCI_ERS_RESULT_RECOVERED;
5751 }
5752
5753 pci_cleanup_aer_uncorrect_error_status(pdev);
5754
5755 return result;
5756}
5757
5758/**
5759 * e1000_io_resume - called when traffic can start flowing again.
5760 * @pdev: Pointer to PCI device
5761 *
5762 * This callback is called when the error recovery driver tells us that
5763 * its OK to resume normal operation. Implementation resembles the
5764 * second-half of the e1000_resume routine.
5765 */
5766static void e1000_io_resume(struct pci_dev *pdev)
5767{
5768 struct net_device *netdev = pci_get_drvdata(pdev);
5769 struct e1000_adapter *adapter = netdev_priv(netdev);
5770
5771 e1000_init_manageability_pt(adapter);
5772
5773 if (netif_running(netdev)) {
5774 if (e1000e_up(adapter)) {
5775 dev_err(&pdev->dev,
5776 "can't bring device back up after reset\n");
5777 return;
5778 }
5779 }
5780
5781 netif_device_attach(netdev);
5782
5783 /*
5784 * If the controller has AMT, do not set DRV_LOAD until the interface
5785 * is up. For all other cases, let the f/w know that the h/w is now
5786 * under the control of the driver.
5787 */
5788 if (!(adapter->flags & FLAG_HAS_AMT))
5789 e1000e_get_hw_control(adapter);
5790
5791}
5792
5793static void e1000_print_device_info(struct e1000_adapter *adapter)
5794{
5795 struct e1000_hw *hw = &adapter->hw;
5796 struct net_device *netdev = adapter->netdev;
5797 u32 ret_val;
5798 u8 pba_str[E1000_PBANUM_LENGTH];
5799
5800 /* print bus type/speed/width info */
5801 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5802 /* bus width */
5803 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5804 "Width x1"),
5805 /* MAC address */
5806 netdev->dev_addr);
5807 e_info("Intel(R) PRO/%s Network Connection\n",
5808 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5809 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5810 E1000_PBANUM_LENGTH);
5811 if (ret_val)
5812 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5813 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5814 hw->mac.type, hw->phy.type, pba_str);
5815}
5816
5817static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5818{
5819 struct e1000_hw *hw = &adapter->hw;
5820 int ret_val;
5821 u16 buf = 0;
5822
5823 if (hw->mac.type != e1000_82573)
5824 return;
5825
5826 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5827 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5828 /* Deep Smart Power Down (DSPD) */
5829 dev_warn(&adapter->pdev->dev,
5830 "Warning: detected DSPD enabled in EEPROM\n");
5831 }
5832}
5833
5834static const struct net_device_ops e1000e_netdev_ops = {
5835 .ndo_open = e1000_open,
5836 .ndo_stop = e1000_close,
5837 .ndo_start_xmit = e1000_xmit_frame,
5838 .ndo_get_stats64 = e1000e_get_stats64,
5839 .ndo_set_multicast_list = e1000_set_multi,
5840 .ndo_set_mac_address = e1000_set_mac,
5841 .ndo_change_mtu = e1000_change_mtu,
5842 .ndo_do_ioctl = e1000_ioctl,
5843 .ndo_tx_timeout = e1000_tx_timeout,
5844 .ndo_validate_addr = eth_validate_addr,
5845
5846 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5847 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5848#ifdef CONFIG_NET_POLL_CONTROLLER
5849 .ndo_poll_controller = e1000_netpoll,
5850#endif
5851};
5852
5853/**
5854 * e1000_probe - Device Initialization Routine
5855 * @pdev: PCI device information struct
5856 * @ent: entry in e1000_pci_tbl
5857 *
5858 * Returns 0 on success, negative on failure
5859 *
5860 * e1000_probe initializes an adapter identified by a pci_dev structure.
5861 * The OS initialization, configuring of the adapter private structure,
5862 * and a hardware reset occur.
5863 **/
5864static int __devinit e1000_probe(struct pci_dev *pdev,
5865 const struct pci_device_id *ent)
5866{
5867 struct net_device *netdev;
5868 struct e1000_adapter *adapter;
5869 struct e1000_hw *hw;
5870 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5871 resource_size_t mmio_start, mmio_len;
5872 resource_size_t flash_start, flash_len;
5873
5874 static int cards_found;
5875 u16 aspm_disable_flag = 0;
5876 int i, err, pci_using_dac;
5877 u16 eeprom_data = 0;
5878 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5879
5880 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
5881 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5882 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5883 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5884 if (aspm_disable_flag)
5885 e1000e_disable_aspm(pdev, aspm_disable_flag);
5886
5887 err = pci_enable_device_mem(pdev);
5888 if (err)
5889 return err;
5890
5891 pci_using_dac = 0;
5892 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5893 if (!err) {
5894 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5895 if (!err)
5896 pci_using_dac = 1;
5897 } else {
5898 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5899 if (err) {
5900 err = dma_set_coherent_mask(&pdev->dev,
5901 DMA_BIT_MASK(32));
5902 if (err) {
5903 dev_err(&pdev->dev, "No usable DMA "
5904 "configuration, aborting\n");
5905 goto err_dma;
5906 }
5907 }
5908 }
5909
5910 err = pci_request_selected_regions_exclusive(pdev,
5911 pci_select_bars(pdev, IORESOURCE_MEM),
5912 e1000e_driver_name);
5913 if (err)
5914 goto err_pci_reg;
5915
5916 /* AER (Advanced Error Reporting) hooks */
5917 pci_enable_pcie_error_reporting(pdev);
5918
5919 pci_set_master(pdev);
5920 /* PCI config space info */
5921 err = pci_save_state(pdev);
5922 if (err)
5923 goto err_alloc_etherdev;
5924
5925 err = -ENOMEM;
5926 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5927 if (!netdev)
5928 goto err_alloc_etherdev;
5929
5930 SET_NETDEV_DEV(netdev, &pdev->dev);
5931
5932 netdev->irq = pdev->irq;
5933
5934 pci_set_drvdata(pdev, netdev);
5935 adapter = netdev_priv(netdev);
5936 hw = &adapter->hw;
5937 adapter->netdev = netdev;
5938 adapter->pdev = pdev;
5939 adapter->ei = ei;
5940 adapter->pba = ei->pba;
5941 adapter->flags = ei->flags;
5942 adapter->flags2 = ei->flags2;
5943 adapter->hw.adapter = adapter;
5944 adapter->hw.mac.type = ei->mac;
5945 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5946 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5947
5948 mmio_start = pci_resource_start(pdev, 0);
5949 mmio_len = pci_resource_len(pdev, 0);
5950
5951 err = -EIO;
5952 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5953 if (!adapter->hw.hw_addr)
5954 goto err_ioremap;
5955
5956 if ((adapter->flags & FLAG_HAS_FLASH) &&
5957 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5958 flash_start = pci_resource_start(pdev, 1);
5959 flash_len = pci_resource_len(pdev, 1);
5960 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5961 if (!adapter->hw.flash_address)
5962 goto err_flashmap;
5963 }
5964
5965 /* construct the net_device struct */
5966 netdev->netdev_ops = &e1000e_netdev_ops;
5967 e1000e_set_ethtool_ops(netdev);
5968 netdev->watchdog_timeo = 5 * HZ;
5969 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5970 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5971
5972 netdev->mem_start = mmio_start;
5973 netdev->mem_end = mmio_start + mmio_len;
5974
5975 adapter->bd_number = cards_found++;
5976
5977 e1000e_check_options(adapter);
5978
5979 /* setup adapter struct */
5980 err = e1000_sw_init(adapter);
5981 if (err)
5982 goto err_sw_init;
5983
5984 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5985 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5986 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5987
5988 err = ei->get_variants(adapter);
5989 if (err)
5990 goto err_hw_init;
5991
5992 if ((adapter->flags & FLAG_IS_ICH) &&
5993 (adapter->flags & FLAG_READ_ONLY_NVM))
5994 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5995
5996 hw->mac.ops.get_bus_info(&adapter->hw);
5997
5998 adapter->hw.phy.autoneg_wait_to_complete = 0;
5999
6000 /* Copper options */
6001 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6002 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6003 adapter->hw.phy.disable_polarity_correction = 0;
6004 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6005 }
6006
6007 if (e1000_check_reset_block(&adapter->hw))
6008 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6009
6010 netdev->features = NETIF_F_SG |
6011 NETIF_F_HW_CSUM |
6012 NETIF_F_HW_VLAN_TX |
6013 NETIF_F_HW_VLAN_RX;
6014
6015 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6016 netdev->features |= NETIF_F_HW_VLAN_FILTER;
6017
6018 netdev->features |= NETIF_F_TSO;
6019 netdev->features |= NETIF_F_TSO6;
6020
6021 netdev->vlan_features |= NETIF_F_TSO;
6022 netdev->vlan_features |= NETIF_F_TSO6;
6023 netdev->vlan_features |= NETIF_F_HW_CSUM;
6024 netdev->vlan_features |= NETIF_F_SG;
6025
6026 if (pci_using_dac) {
6027 netdev->features |= NETIF_F_HIGHDMA;
6028 netdev->vlan_features |= NETIF_F_HIGHDMA;
6029 }
6030
6031 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6032 adapter->flags |= FLAG_MNG_PT_ENABLED;
6033
6034 /*
6035 * before reading the NVM, reset the controller to
6036 * put the device in a known good starting state
6037 */
6038 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6039
6040 /*
6041 * systems with ASPM and others may see the checksum fail on the first
6042 * attempt. Let's give it a few tries
6043 */
6044 for (i = 0;; i++) {
6045 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6046 break;
6047 if (i == 2) {
6048 e_err("The NVM Checksum Is Not Valid\n");
6049#ifndef CONFIG_E1000E_DISABLE_CHECKSUM
6050 err = -EIO;
6051 goto err_eeprom;
6052#else
6053 break;
6054#endif
6055 }
6056 }
6057
6058 e1000_eeprom_checks(adapter);
6059
6060 /* copy the MAC address */
6061 if (e1000e_read_mac_addr(&adapter->hw))
6062 e_err("NVM Read Error while reading MAC address\n");
6063
6064 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6065 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6066
6067 if (!is_valid_ether_addr(netdev->perm_addr)) {
6068 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
6069#ifndef CONFIG_E1000E_DISABLE_CHECKSUM
6070 err = -EIO;
6071 goto err_eeprom;
6072#endif
6073 }
6074
6075 init_timer(&adapter->watchdog_timer);
6076 adapter->watchdog_timer.function = e1000_watchdog;
6077 adapter->watchdog_timer.data = (unsigned long) adapter;
6078
6079 init_timer(&adapter->phy_info_timer);
6080 adapter->phy_info_timer.function = e1000_update_phy_info;
6081 adapter->phy_info_timer.data = (unsigned long) adapter;
6082
6083 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6084 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6085 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6086 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6087 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6088
6089 /* Initialize link parameters. User can change them with ethtool */
6090 adapter->hw.mac.autoneg = 1;
6091 adapter->fc_autoneg = 1;
6092 adapter->hw.fc.requested_mode = e1000_fc_default;
6093 adapter->hw.fc.current_mode = e1000_fc_default;
6094 adapter->hw.phy.autoneg_advertised = 0x2f;
6095
6096 /* ring size defaults */
6097 adapter->rx_ring->count = 256;
6098 adapter->tx_ring->count = 256;
6099
6100 /*
6101 * Initial Wake on LAN setting - If APM wake is enabled in
6102 * the EEPROM, enable the ACPI Magic Packet filter
6103 */
6104 if (adapter->flags & FLAG_APME_IN_WUC) {
6105 /* APME bit in EEPROM is mapped to WUC.APME */
6106 eeprom_data = er32(WUC);
6107 eeprom_apme_mask = E1000_WUC_APME;
6108 if ((hw->mac.type > e1000_ich10lan) &&
6109 (eeprom_data & E1000_WUC_PHY_WAKE))
6110 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6111 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6112 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6113 (adapter->hw.bus.func == 1))
6114 e1000_read_nvm(&adapter->hw,
6115 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
6116 else
6117 e1000_read_nvm(&adapter->hw,
6118 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6119 }
6120
6121 /* fetch WoL from EEPROM */
6122 if (eeprom_data & eeprom_apme_mask)
6123 adapter->eeprom_wol |= E1000_WUFC_MAG;
6124
6125 /*
6126 * now that we have the eeprom settings, apply the special cases
6127 * where the eeprom may be wrong or the board simply won't support
6128 * wake on lan on a particular port
6129 */
6130 if (!(adapter->flags & FLAG_HAS_WOL))
6131 adapter->eeprom_wol = 0;
6132
6133 /* initialize the wol settings based on the eeprom settings */
6134 adapter->wol = adapter->eeprom_wol;
6135 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6136
6137 /* save off EEPROM version number */
6138 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6139
6140 /* reset the hardware with the new settings */
6141 e1000e_reset(adapter);
6142
6143 /*
6144 * If the controller has AMT, do not set DRV_LOAD until the interface
6145 * is up. For all other cases, let the f/w know that the h/w is now
6146 * under the control of the driver.
6147 */
6148 if (!(adapter->flags & FLAG_HAS_AMT))
6149 e1000e_get_hw_control(adapter);
6150
6151 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
6152 err = register_netdev(netdev);
6153 if (err)
6154 goto err_register;
6155
6156 /* carrier off reporting is important to ethtool even BEFORE open */
6157 netif_carrier_off(netdev);
6158
6159 e1000_print_device_info(adapter);
6160
6161 if (pci_dev_run_wake(pdev))
6162 pm_runtime_put_noidle(&pdev->dev);
6163
6164 return 0;
6165
6166err_register:
6167 if (!(adapter->flags & FLAG_HAS_AMT))
6168 e1000e_release_hw_control(adapter);
6169err_eeprom:
6170 if (!e1000_check_reset_block(&adapter->hw))
6171 e1000_phy_hw_reset(&adapter->hw);
6172err_hw_init:
6173 kfree(adapter->tx_ring);
6174 kfree(adapter->rx_ring);
6175err_sw_init:
6176 if (adapter->hw.flash_address)
6177 iounmap(adapter->hw.flash_address);
6178 e1000e_reset_interrupt_capability(adapter);
6179err_flashmap:
6180 iounmap(adapter->hw.hw_addr);
6181err_ioremap:
6182 free_netdev(netdev);
6183err_alloc_etherdev:
6184 pci_release_selected_regions(pdev,
6185 pci_select_bars(pdev, IORESOURCE_MEM));
6186err_pci_reg:
6187err_dma:
6188 pci_disable_device(pdev);
6189 return err;
6190}
6191
6192/**
6193 * e1000_remove - Device Removal Routine
6194 * @pdev: PCI device information struct
6195 *
6196 * e1000_remove is called by the PCI subsystem to alert the driver
6197 * that it should release a PCI device. The could be caused by a
6198 * Hot-Plug event, or because the driver is going to be removed from
6199 * memory.
6200 **/
6201static void __devexit e1000_remove(struct pci_dev *pdev)
6202{
6203 struct net_device *netdev = pci_get_drvdata(pdev);
6204 struct e1000_adapter *adapter = netdev_priv(netdev);
6205 bool down = test_bit(__E1000_DOWN, &adapter->state);
6206
6207 /*
6208 * The timers may be rescheduled, so explicitly disable them
6209 * from being rescheduled.
6210 */
6211 if (!down)
6212 set_bit(__E1000_DOWN, &adapter->state);
6213 del_timer_sync(&adapter->watchdog_timer);
6214 del_timer_sync(&adapter->phy_info_timer);
6215
6216 cancel_work_sync(&adapter->reset_task);
6217 cancel_work_sync(&adapter->watchdog_task);
6218 cancel_work_sync(&adapter->downshift_task);
6219 cancel_work_sync(&adapter->update_phy_task);
6220 cancel_work_sync(&adapter->print_hang_task);
6221
6222 if (!(netdev->flags & IFF_UP))
6223 e1000_power_down_phy(adapter);
6224
6225 /* Don't lie to e1000_close() down the road. */
6226 if (!down)
6227 clear_bit(__E1000_DOWN, &adapter->state);
6228 unregister_netdev(netdev);
6229
6230 if (pci_dev_run_wake(pdev))
6231 pm_runtime_get_noresume(&pdev->dev);
6232
6233 /*
6234 * Release control of h/w to f/w. If f/w is AMT enabled, this
6235 * would have already happened in close and is redundant.
6236 */
6237 e1000e_release_hw_control(adapter);
6238
6239 e1000e_reset_interrupt_capability(adapter);
6240 kfree(adapter->tx_ring);
6241 kfree(adapter->rx_ring);
6242
6243 iounmap(adapter->hw.hw_addr);
6244 if (adapter->hw.flash_address)
6245 iounmap(adapter->hw.flash_address);
6246 pci_release_selected_regions(pdev,
6247 pci_select_bars(pdev, IORESOURCE_MEM));
6248
6249 free_netdev(netdev);
6250
6251 /* AER disable */
6252 pci_disable_pcie_error_reporting(pdev);
6253
6254 pci_disable_device(pdev);
6255}
6256
6257/* PCI Error Recovery (ERS) */
6258static struct pci_error_handlers e1000_err_handler = {
6259 .error_detected = e1000_io_error_detected,
6260 .slot_reset = e1000_io_slot_reset,
6261 .resume = e1000_io_resume,
6262};
6263
6264static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6265 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6266 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6267 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6268 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6269 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6270 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6271 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6272 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6273 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6274
6275 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6276 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6277 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6278 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6279
6280 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6281 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6282 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6283
6284 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6285 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6286 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6287
6288 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6289 board_80003es2lan },
6290 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6291 board_80003es2lan },
6292 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6293 board_80003es2lan },
6294 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6295 board_80003es2lan },
6296
6297 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6298 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6299 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6300 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6301 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6302 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6303 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6304 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6305
6306 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6307 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6308 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6309 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6310 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6311 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6312 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6313 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6314 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6315
6316 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6317 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6318 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6319
6320 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6321 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6322 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6323
6324 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6325 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6326 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6327 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6328
6329 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6331
6332 { } /* terminate list */
6333};
6334MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6335
6336#ifdef CONFIG_PM
6337static const struct dev_pm_ops e1000_pm_ops = {
6338 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6339 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6340 e1000_runtime_resume, e1000_idle)
6341};
6342#endif
6343
6344/* PCI Device API Driver */
6345static struct pci_driver e1000_driver = {
6346 .name = e1000e_driver_name,
6347 .id_table = e1000_pci_tbl,
6348 .probe = e1000_probe,
6349 .remove = __devexit_p(e1000_remove),
6350#ifdef CONFIG_PM
6351 .driver.pm = &e1000_pm_ops,
6352#endif
6353 .shutdown = e1000_shutdown,
6354 .err_handler = &e1000_err_handler
6355};
6356
6357/**
6358 * e1000_init_module - Driver Registration Routine
6359 *
6360 * e1000_init_module is the first routine called when the driver is
6361 * loaded. All it does is register with the PCI subsystem.
6362 **/
6363static int __init e1000_init_module(void)
6364{
6365 int ret;
6366 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6367 e1000e_driver_version);
6368 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
6369 ret = pci_register_driver(&e1000_driver);
6370
6371 return ret;
6372}
6373module_init(e1000_init_module);
6374
6375/**
6376 * e1000_exit_module - Driver Exit Cleanup Routine
6377 *
6378 * e1000_exit_module is called just before the driver is removed
6379 * from memory.
6380 **/
6381static void __exit e1000_exit_module(void)
6382{
6383 pci_unregister_driver(&e1000_driver);
6384}
6385module_exit(e1000_exit_module);
6386
6387
6388MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6389MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6390MODULE_LICENSE("GPL");
6391MODULE_VERSION(DRV_VERSION);
6392
6393/* e1000_main.c */
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
new file mode 100644
index 00000000000..4dd9b63273f
--- /dev/null
+++ b/drivers/net/e1000e/param.c
@@ -0,0 +1,478 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/pci.h>
31
32#include "e1000.h"
33
34/*
35 * This is the only thing that needs to be changed to adjust the
36 * maximum number of ports that the driver can manage.
37 */
38
39#define E1000_MAX_NIC 32
40
41#define OPTION_UNSET -1
42#define OPTION_DISABLED 0
43#define OPTION_ENABLED 1
44
45#define COPYBREAK_DEFAULT 256
46unsigned int copybreak = COPYBREAK_DEFAULT;
47module_param(copybreak, uint, 0644);
48MODULE_PARM_DESC(copybreak,
49 "Maximum size of packet that is copied to a new buffer on receive");
50
51/*
52 * All parameters are treated the same, as an integer array of values.
53 * This macro just reduces the need to repeat the same declaration code
54 * over and over (plus this helps to avoid typo bugs).
55 */
56
57#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
58#define E1000_PARAM(X, desc) \
59 static int __devinitdata X[E1000_MAX_NIC+1] \
60 = E1000_PARAM_INIT; \
61 static unsigned int num_##X; \
62 module_param_array_named(X, X, int, &num_##X, 0); \
63 MODULE_PARM_DESC(X, desc);
64
65/*
66 * Transmit Interrupt Delay in units of 1.024 microseconds
67 * Tx interrupt delay needs to typically be set to something non-zero
68 *
69 * Valid Range: 0-65535
70 */
71E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
72#define DEFAULT_TIDV 8
73#define MAX_TXDELAY 0xFFFF
74#define MIN_TXDELAY 0
75
76/*
77 * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
78 *
79 * Valid Range: 0-65535
80 */
81E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
82#define DEFAULT_TADV 32
83#define MAX_TXABSDELAY 0xFFFF
84#define MIN_TXABSDELAY 0
85
86/*
87 * Receive Interrupt Delay in units of 1.024 microseconds
88 * hardware will likely hang if you set this to anything but zero.
89 *
90 * Valid Range: 0-65535
91 */
92E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
93#define MAX_RXDELAY 0xFFFF
94#define MIN_RXDELAY 0
95
96/*
97 * Receive Absolute Interrupt Delay in units of 1.024 microseconds
98 *
99 * Valid Range: 0-65535
100 */
101E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
102#define MAX_RXABSDELAY 0xFFFF
103#define MIN_RXABSDELAY 0
104
105/*
106 * Interrupt Throttle Rate (interrupts/sec)
107 *
108 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
109 */
110E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
111#define DEFAULT_ITR 3
112#define MAX_ITR 100000
113#define MIN_ITR 100
114
115/* IntMode (Interrupt Mode)
116 *
117 * Valid Range: 0 - 2
118 *
119 * Default Value: 2 (MSI-X)
120 */
121E1000_PARAM(IntMode, "Interrupt Mode");
122#define MAX_INTMODE 2
123#define MIN_INTMODE 0
124
125/*
126 * Enable Smart Power Down of the PHY
127 *
128 * Valid Range: 0, 1
129 *
130 * Default Value: 0 (disabled)
131 */
132E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
133
134/*
135 * Enable Kumeran Lock Loss workaround
136 *
137 * Valid Range: 0, 1
138 *
139 * Default Value: 1 (enabled)
140 */
141E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
142
143/*
144 * Write Protect NVM
145 *
146 * Valid Range: 0, 1
147 *
148 * Default Value: 1 (enabled)
149 */
150E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
151
152/*
153 * Enable CRC Stripping
154 *
155 * Valid Range: 0, 1
156 *
157 * Default Value: 1 (enabled)
158 */
159E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
160 "the CRC");
161
162struct e1000_option {
163 enum { enable_option, range_option, list_option } type;
164 const char *name;
165 const char *err;
166 int def;
167 union {
168 struct { /* range_option info */
169 int min;
170 int max;
171 } r;
172 struct { /* list_option info */
173 int nr;
174 struct e1000_opt_list { int i; char *str; } *p;
175 } l;
176 } arg;
177};
178
179static int __devinit e1000_validate_option(unsigned int *value,
180 const struct e1000_option *opt,
181 struct e1000_adapter *adapter)
182{
183 if (*value == OPTION_UNSET) {
184 *value = opt->def;
185 return 0;
186 }
187
188 switch (opt->type) {
189 case enable_option:
190 switch (*value) {
191 case OPTION_ENABLED:
192 e_info("%s Enabled\n", opt->name);
193 return 0;
194 case OPTION_DISABLED:
195 e_info("%s Disabled\n", opt->name);
196 return 0;
197 }
198 break;
199 case range_option:
200 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
201 e_info("%s set to %i\n", opt->name, *value);
202 return 0;
203 }
204 break;
205 case list_option: {
206 int i;
207 struct e1000_opt_list *ent;
208
209 for (i = 0; i < opt->arg.l.nr; i++) {
210 ent = &opt->arg.l.p[i];
211 if (*value == ent->i) {
212 if (ent->str[0] != '\0')
213 e_info("%s\n", ent->str);
214 return 0;
215 }
216 }
217 }
218 break;
219 default:
220 BUG();
221 }
222
223 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
224 opt->err);
225 *value = opt->def;
226 return -1;
227}
228
229/**
230 * e1000e_check_options - Range Checking for Command Line Parameters
231 * @adapter: board private structure
232 *
233 * This routine checks all command line parameters for valid user
234 * input. If an invalid value is given, or if no user specified
235 * value exists, a default value is used. The final value is stored
236 * in a variable in the adapter structure.
237 **/
238void __devinit e1000e_check_options(struct e1000_adapter *adapter)
239{
240 struct e1000_hw *hw = &adapter->hw;
241 int bd = adapter->bd_number;
242
243 if (bd >= E1000_MAX_NIC) {
244 e_notice("Warning: no configuration for board #%i\n", bd);
245 e_notice("Using defaults for all values\n");
246 }
247
248 { /* Transmit Interrupt Delay */
249 static const struct e1000_option opt = {
250 .type = range_option,
251 .name = "Transmit Interrupt Delay",
252 .err = "using default of "
253 __MODULE_STRING(DEFAULT_TIDV),
254 .def = DEFAULT_TIDV,
255 .arg = { .r = { .min = MIN_TXDELAY,
256 .max = MAX_TXDELAY } }
257 };
258
259 if (num_TxIntDelay > bd) {
260 adapter->tx_int_delay = TxIntDelay[bd];
261 e1000_validate_option(&adapter->tx_int_delay, &opt,
262 adapter);
263 } else {
264 adapter->tx_int_delay = opt.def;
265 }
266 }
267 { /* Transmit Absolute Interrupt Delay */
268 static const struct e1000_option opt = {
269 .type = range_option,
270 .name = "Transmit Absolute Interrupt Delay",
271 .err = "using default of "
272 __MODULE_STRING(DEFAULT_TADV),
273 .def = DEFAULT_TADV,
274 .arg = { .r = { .min = MIN_TXABSDELAY,
275 .max = MAX_TXABSDELAY } }
276 };
277
278 if (num_TxAbsIntDelay > bd) {
279 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
280 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
281 adapter);
282 } else {
283 adapter->tx_abs_int_delay = opt.def;
284 }
285 }
286 { /* Receive Interrupt Delay */
287 static struct e1000_option opt = {
288 .type = range_option,
289 .name = "Receive Interrupt Delay",
290 .err = "using default of "
291 __MODULE_STRING(DEFAULT_RDTR),
292 .def = DEFAULT_RDTR,
293 .arg = { .r = { .min = MIN_RXDELAY,
294 .max = MAX_RXDELAY } }
295 };
296
297 if (num_RxIntDelay > bd) {
298 adapter->rx_int_delay = RxIntDelay[bd];
299 e1000_validate_option(&adapter->rx_int_delay, &opt,
300 adapter);
301 } else {
302 adapter->rx_int_delay = opt.def;
303 }
304 }
305 { /* Receive Absolute Interrupt Delay */
306 static const struct e1000_option opt = {
307 .type = range_option,
308 .name = "Receive Absolute Interrupt Delay",
309 .err = "using default of "
310 __MODULE_STRING(DEFAULT_RADV),
311 .def = DEFAULT_RADV,
312 .arg = { .r = { .min = MIN_RXABSDELAY,
313 .max = MAX_RXABSDELAY } }
314 };
315
316 if (num_RxAbsIntDelay > bd) {
317 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
318 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
319 adapter);
320 } else {
321 adapter->rx_abs_int_delay = opt.def;
322 }
323 }
324 { /* Interrupt Throttling Rate */
325 static const struct e1000_option opt = {
326 .type = range_option,
327 .name = "Interrupt Throttling Rate (ints/sec)",
328 .err = "using default of "
329 __MODULE_STRING(DEFAULT_ITR),
330 .def = DEFAULT_ITR,
331 .arg = { .r = { .min = MIN_ITR,
332 .max = MAX_ITR } }
333 };
334
335 if (num_InterruptThrottleRate > bd) {
336 adapter->itr = InterruptThrottleRate[bd];
337 switch (adapter->itr) {
338 case 0:
339 e_info("%s turned off\n", opt.name);
340 break;
341 case 1:
342 e_info("%s set to dynamic mode\n", opt.name);
343 adapter->itr_setting = adapter->itr;
344 adapter->itr = 20000;
345 break;
346 case 3:
347 e_info("%s set to dynamic conservative mode\n",
348 opt.name);
349 adapter->itr_setting = adapter->itr;
350 adapter->itr = 20000;
351 break;
352 case 4:
353 e_info("%s set to simplified (2000-8000 ints) "
354 "mode\n", opt.name);
355 adapter->itr_setting = 4;
356 break;
357 default:
358 /*
359 * Save the setting, because the dynamic bits
360 * change itr.
361 */
362 if (e1000_validate_option(&adapter->itr, &opt,
363 adapter) &&
364 (adapter->itr == 3)) {
365 /*
366 * In case of invalid user value,
367 * default to conservative mode.
368 */
369 adapter->itr_setting = adapter->itr;
370 adapter->itr = 20000;
371 } else {
372 /*
373 * Clear the lower two bits because
374 * they are used as control.
375 */
376 adapter->itr_setting =
377 adapter->itr & ~3;
378 }
379 break;
380 }
381 } else {
382 adapter->itr_setting = opt.def;
383 adapter->itr = 20000;
384 }
385 }
386 { /* Interrupt Mode */
387 static struct e1000_option opt = {
388 .type = range_option,
389 .name = "Interrupt Mode",
390 .err = "defaulting to 2 (MSI-X)",
391 .def = E1000E_INT_MODE_MSIX,
392 .arg = { .r = { .min = MIN_INTMODE,
393 .max = MAX_INTMODE } }
394 };
395
396 if (num_IntMode > bd) {
397 unsigned int int_mode = IntMode[bd];
398 e1000_validate_option(&int_mode, &opt, adapter);
399 adapter->int_mode = int_mode;
400 } else {
401 adapter->int_mode = opt.def;
402 }
403 }
404 { /* Smart Power Down */
405 static const struct e1000_option opt = {
406 .type = enable_option,
407 .name = "PHY Smart Power Down",
408 .err = "defaulting to Disabled",
409 .def = OPTION_DISABLED
410 };
411
412 if (num_SmartPowerDownEnable > bd) {
413 unsigned int spd = SmartPowerDownEnable[bd];
414 e1000_validate_option(&spd, &opt, adapter);
415 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
416 && spd)
417 adapter->flags |= FLAG_SMART_POWER_DOWN;
418 }
419 }
420 { /* CRC Stripping */
421 static const struct e1000_option opt = {
422 .type = enable_option,
423 .name = "CRC Stripping",
424 .err = "defaulting to Enabled",
425 .def = OPTION_ENABLED
426 };
427
428 if (num_CrcStripping > bd) {
429 unsigned int crc_stripping = CrcStripping[bd];
430 e1000_validate_option(&crc_stripping, &opt, adapter);
431 if (crc_stripping == OPTION_ENABLED)
432 adapter->flags2 |= FLAG2_CRC_STRIPPING;
433 } else {
434 adapter->flags2 |= FLAG2_CRC_STRIPPING;
435 }
436 }
437 { /* Kumeran Lock Loss Workaround */
438 static const struct e1000_option opt = {
439 .type = enable_option,
440 .name = "Kumeran Lock Loss Workaround",
441 .err = "defaulting to Enabled",
442 .def = OPTION_ENABLED
443 };
444
445 if (num_KumeranLockLoss > bd) {
446 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
447 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
448 if (hw->mac.type == e1000_ich8lan)
449 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
450 kmrn_lock_loss);
451 } else {
452 if (hw->mac.type == e1000_ich8lan)
453 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
454 opt.def);
455 }
456 }
457 { /* Write-protect NVM */
458 static const struct e1000_option opt = {
459 .type = enable_option,
460 .name = "Write-protect NVM",
461 .err = "defaulting to Enabled",
462 .def = OPTION_ENABLED
463 };
464
465 if (adapter->flags & FLAG_IS_ICH) {
466 if (num_WriteProtectNVM > bd) {
467 unsigned int write_protect_nvm = WriteProtectNVM[bd];
468 e1000_validate_option(&write_protect_nvm, &opt,
469 adapter);
470 if (write_protect_nvm)
471 adapter->flags |= FLAG_READ_ONLY_NVM;
472 } else {
473 if (opt.def)
474 adapter->flags |= FLAG_READ_ONLY_NVM;
475 }
476 }
477 }
478}
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
new file mode 100644
index 00000000000..8666476cb9b
--- /dev/null
+++ b/drivers/net/e1000e/phy.c
@@ -0,0 +1,3377 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/delay.h>
30
31#include "e1000.h"
32
33static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
36static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
38static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
39 u16 *data, bool read, bool page_set);
40static u32 e1000_get_phy_addr_for_hv_page(u32 page);
41static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
42 u16 *data, bool read);
43
44/* Cable length tables */
45static const u16 e1000_m88_cable_length_table[] = {
46 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
47#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
48 ARRAY_SIZE(e1000_m88_cable_length_table)
49
50static const u16 e1000_igp_2_cable_length_table[] = {
51 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
52 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
53 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
54 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
55 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
56 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
57 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
58 124};
59#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
60 ARRAY_SIZE(e1000_igp_2_cable_length_table)
61
62#define BM_PHY_REG_PAGE(offset) \
63 ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
64#define BM_PHY_REG_NUM(offset) \
65 ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
66 (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
67 ~MAX_PHY_REG_ADDRESS)))
68
69#define HV_INTC_FC_PAGE_START 768
70#define I82578_ADDR_REG 29
71#define I82577_ADDR_REG 16
72#define I82577_CFG_REG 22
73#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
74#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
75#define I82577_CTRL_REG 23
76
77/* 82577 specific PHY registers */
78#define I82577_PHY_CTRL_2 18
79#define I82577_PHY_STATUS_2 26
80#define I82577_PHY_DIAG_STATUS 31
81
82/* I82577 PHY Status 2 */
83#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
84#define I82577_PHY_STATUS2_MDIX 0x0800
85#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
86#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
87
88/* I82577 PHY Control 2 */
89#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
90#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
91
92/* I82577 PHY Diagnostics Status */
93#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
94#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
95
96/* BM PHY Copper Specific Control 1 */
97#define BM_CS_CTRL1 16
98
99#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
100#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
101#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
102
103/**
104 * e1000e_check_reset_block_generic - Check if PHY reset is blocked
105 * @hw: pointer to the HW structure
106 *
107 * Read the PHY management control register and check whether a PHY reset
108 * is blocked. If a reset is not blocked return 0, otherwise
109 * return E1000_BLK_PHY_RESET (12).
110 **/
111s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
112{
113 u32 manc;
114
115 manc = er32(MANC);
116
117 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
118 E1000_BLK_PHY_RESET : 0;
119}
120
121/**
122 * e1000e_get_phy_id - Retrieve the PHY ID and revision
123 * @hw: pointer to the HW structure
124 *
125 * Reads the PHY registers and stores the PHY ID and possibly the PHY
126 * revision in the hardware structure.
127 **/
128s32 e1000e_get_phy_id(struct e1000_hw *hw)
129{
130 struct e1000_phy_info *phy = &hw->phy;
131 s32 ret_val = 0;
132 u16 phy_id;
133 u16 retry_count = 0;
134
135 if (!(phy->ops.read_reg))
136 goto out;
137
138 while (retry_count < 2) {
139 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
140 if (ret_val)
141 goto out;
142
143 phy->id = (u32)(phy_id << 16);
144 udelay(20);
145 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
146 if (ret_val)
147 goto out;
148
149 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
150 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
151
152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
153 goto out;
154
155 retry_count++;
156 }
157out:
158 return ret_val;
159}
160
161/**
162 * e1000e_phy_reset_dsp - Reset PHY DSP
163 * @hw: pointer to the HW structure
164 *
165 * Reset the digital signal processor.
166 **/
167s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
168{
169 s32 ret_val;
170
171 ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
172 if (ret_val)
173 return ret_val;
174
175 return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
176}
177
178/**
179 * e1000e_read_phy_reg_mdic - Read MDI control register
180 * @hw: pointer to the HW structure
181 * @offset: register offset to be read
182 * @data: pointer to the read data
183 *
184 * Reads the MDI control register in the PHY at offset and stores the
185 * information read to data.
186 **/
187s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
188{
189 struct e1000_phy_info *phy = &hw->phy;
190 u32 i, mdic = 0;
191
192 if (offset > MAX_PHY_REG_ADDRESS) {
193 e_dbg("PHY Address %d is out of range\n", offset);
194 return -E1000_ERR_PARAM;
195 }
196
197 /*
198 * Set up Op-code, Phy Address, and register offset in the MDI
199 * Control register. The MAC will take care of interfacing with the
200 * PHY to retrieve the desired data.
201 */
202 mdic = ((offset << E1000_MDIC_REG_SHIFT) |
203 (phy->addr << E1000_MDIC_PHY_SHIFT) |
204 (E1000_MDIC_OP_READ));
205
206 ew32(MDIC, mdic);
207
208 /*
209 * Poll the ready bit to see if the MDI read completed
210 * Increasing the time out as testing showed failures with
211 * the lower time out
212 */
213 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
214 udelay(50);
215 mdic = er32(MDIC);
216 if (mdic & E1000_MDIC_READY)
217 break;
218 }
219 if (!(mdic & E1000_MDIC_READY)) {
220 e_dbg("MDI Read did not complete\n");
221 return -E1000_ERR_PHY;
222 }
223 if (mdic & E1000_MDIC_ERROR) {
224 e_dbg("MDI Error\n");
225 return -E1000_ERR_PHY;
226 }
227 *data = (u16) mdic;
228
229 /*
230 * Allow some time after each MDIC transaction to avoid
231 * reading duplicate data in the next MDIC transaction.
232 */
233 if (hw->mac.type == e1000_pch2lan)
234 udelay(100);
235
236 return 0;
237}
238
239/**
240 * e1000e_write_phy_reg_mdic - Write MDI control register
241 * @hw: pointer to the HW structure
242 * @offset: register offset to write to
243 * @data: data to write to register at offset
244 *
245 * Writes data to MDI control register in the PHY at offset.
246 **/
247s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
248{
249 struct e1000_phy_info *phy = &hw->phy;
250 u32 i, mdic = 0;
251
252 if (offset > MAX_PHY_REG_ADDRESS) {
253 e_dbg("PHY Address %d is out of range\n", offset);
254 return -E1000_ERR_PARAM;
255 }
256
257 /*
258 * Set up Op-code, Phy Address, and register offset in the MDI
259 * Control register. The MAC will take care of interfacing with the
260 * PHY to retrieve the desired data.
261 */
262 mdic = (((u32)data) |
263 (offset << E1000_MDIC_REG_SHIFT) |
264 (phy->addr << E1000_MDIC_PHY_SHIFT) |
265 (E1000_MDIC_OP_WRITE));
266
267 ew32(MDIC, mdic);
268
269 /*
270 * Poll the ready bit to see if the MDI read completed
271 * Increasing the time out as testing showed failures with
272 * the lower time out
273 */
274 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
275 udelay(50);
276 mdic = er32(MDIC);
277 if (mdic & E1000_MDIC_READY)
278 break;
279 }
280 if (!(mdic & E1000_MDIC_READY)) {
281 e_dbg("MDI Write did not complete\n");
282 return -E1000_ERR_PHY;
283 }
284 if (mdic & E1000_MDIC_ERROR) {
285 e_dbg("MDI Error\n");
286 return -E1000_ERR_PHY;
287 }
288
289 /*
290 * Allow some time after each MDIC transaction to avoid
291 * reading duplicate data in the next MDIC transaction.
292 */
293 if (hw->mac.type == e1000_pch2lan)
294 udelay(100);
295
296 return 0;
297}
298
299/**
300 * e1000e_read_phy_reg_m88 - Read m88 PHY register
301 * @hw: pointer to the HW structure
302 * @offset: register offset to be read
303 * @data: pointer to the read data
304 *
305 * Acquires semaphore, if necessary, then reads the PHY register at offset
306 * and storing the retrieved information in data. Release any acquired
307 * semaphores before exiting.
308 **/
309s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
310{
311 s32 ret_val;
312
313 ret_val = hw->phy.ops.acquire(hw);
314 if (ret_val)
315 return ret_val;
316
317 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
318 data);
319
320 hw->phy.ops.release(hw);
321
322 return ret_val;
323}
324
325/**
326 * e1000e_write_phy_reg_m88 - Write m88 PHY register
327 * @hw: pointer to the HW structure
328 * @offset: register offset to write to
329 * @data: data to write at register offset
330 *
331 * Acquires semaphore, if necessary, then writes the data to PHY register
332 * at the offset. Release any acquired semaphores before exiting.
333 **/
334s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
335{
336 s32 ret_val;
337
338 ret_val = hw->phy.ops.acquire(hw);
339 if (ret_val)
340 return ret_val;
341
342 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
343 data);
344
345 hw->phy.ops.release(hw);
346
347 return ret_val;
348}
349
350/**
351 * e1000_set_page_igp - Set page as on IGP-like PHY(s)
352 * @hw: pointer to the HW structure
353 * @page: page to set (shifted left when necessary)
354 *
355 * Sets PHY page required for PHY register access. Assumes semaphore is
356 * already acquired. Note, this function sets phy.addr to 1 so the caller
357 * must set it appropriately (if necessary) after this function returns.
358 **/
359s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
360{
361 e_dbg("Setting page 0x%x\n", page);
362
363 hw->phy.addr = 1;
364
365 return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
366}
367
368/**
369 * __e1000e_read_phy_reg_igp - Read igp PHY register
370 * @hw: pointer to the HW structure
371 * @offset: register offset to be read
372 * @data: pointer to the read data
373 * @locked: semaphore has already been acquired or not
374 *
375 * Acquires semaphore, if necessary, then reads the PHY register at offset
376 * and stores the retrieved information in data. Release any acquired
377 * semaphores before exiting.
378 **/
379static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
380 bool locked)
381{
382 s32 ret_val = 0;
383
384 if (!locked) {
385 if (!(hw->phy.ops.acquire))
386 goto out;
387
388 ret_val = hw->phy.ops.acquire(hw);
389 if (ret_val)
390 goto out;
391 }
392
393 if (offset > MAX_PHY_MULTI_PAGE_REG) {
394 ret_val = e1000e_write_phy_reg_mdic(hw,
395 IGP01E1000_PHY_PAGE_SELECT,
396 (u16)offset);
397 if (ret_val)
398 goto release;
399 }
400
401 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
402 data);
403
404release:
405 if (!locked)
406 hw->phy.ops.release(hw);
407out:
408 return ret_val;
409}
410
411/**
412 * e1000e_read_phy_reg_igp - Read igp PHY register
413 * @hw: pointer to the HW structure
414 * @offset: register offset to be read
415 * @data: pointer to the read data
416 *
417 * Acquires semaphore then reads the PHY register at offset and stores the
418 * retrieved information in data.
419 * Release the acquired semaphore before exiting.
420 **/
421s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
422{
423 return __e1000e_read_phy_reg_igp(hw, offset, data, false);
424}
425
426/**
427 * e1000e_read_phy_reg_igp_locked - Read igp PHY register
428 * @hw: pointer to the HW structure
429 * @offset: register offset to be read
430 * @data: pointer to the read data
431 *
432 * Reads the PHY register at offset and stores the retrieved information
433 * in data. Assumes semaphore already acquired.
434 **/
435s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
436{
437 return __e1000e_read_phy_reg_igp(hw, offset, data, true);
438}
439
440/**
441 * e1000e_write_phy_reg_igp - Write igp PHY register
442 * @hw: pointer to the HW structure
443 * @offset: register offset to write to
444 * @data: data to write at register offset
445 * @locked: semaphore has already been acquired or not
446 *
447 * Acquires semaphore, if necessary, then writes the data to PHY register
448 * at the offset. Release any acquired semaphores before exiting.
449 **/
450static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
451 bool locked)
452{
453 s32 ret_val = 0;
454
455 if (!locked) {
456 if (!(hw->phy.ops.acquire))
457 goto out;
458
459 ret_val = hw->phy.ops.acquire(hw);
460 if (ret_val)
461 goto out;
462 }
463
464 if (offset > MAX_PHY_MULTI_PAGE_REG) {
465 ret_val = e1000e_write_phy_reg_mdic(hw,
466 IGP01E1000_PHY_PAGE_SELECT,
467 (u16)offset);
468 if (ret_val)
469 goto release;
470 }
471
472 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
473 data);
474
475release:
476 if (!locked)
477 hw->phy.ops.release(hw);
478
479out:
480 return ret_val;
481}
482
483/**
484 * e1000e_write_phy_reg_igp - Write igp PHY register
485 * @hw: pointer to the HW structure
486 * @offset: register offset to write to
487 * @data: data to write at register offset
488 *
489 * Acquires semaphore then writes the data to PHY register
490 * at the offset. Release any acquired semaphores before exiting.
491 **/
492s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
493{
494 return __e1000e_write_phy_reg_igp(hw, offset, data, false);
495}
496
497/**
498 * e1000e_write_phy_reg_igp_locked - Write igp PHY register
499 * @hw: pointer to the HW structure
500 * @offset: register offset to write to
501 * @data: data to write at register offset
502 *
503 * Writes the data to PHY register at the offset.
504 * Assumes semaphore already acquired.
505 **/
506s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
507{
508 return __e1000e_write_phy_reg_igp(hw, offset, data, true);
509}
510
511/**
512 * __e1000_read_kmrn_reg - Read kumeran register
513 * @hw: pointer to the HW structure
514 * @offset: register offset to be read
515 * @data: pointer to the read data
516 * @locked: semaphore has already been acquired or not
517 *
518 * Acquires semaphore, if necessary. Then reads the PHY register at offset
519 * using the kumeran interface. The information retrieved is stored in data.
520 * Release any acquired semaphores before exiting.
521 **/
522static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
523 bool locked)
524{
525 u32 kmrnctrlsta;
526 s32 ret_val = 0;
527
528 if (!locked) {
529 if (!(hw->phy.ops.acquire))
530 goto out;
531
532 ret_val = hw->phy.ops.acquire(hw);
533 if (ret_val)
534 goto out;
535 }
536
537 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
538 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
539 ew32(KMRNCTRLSTA, kmrnctrlsta);
540 e1e_flush();
541
542 udelay(2);
543
544 kmrnctrlsta = er32(KMRNCTRLSTA);
545 *data = (u16)kmrnctrlsta;
546
547 if (!locked)
548 hw->phy.ops.release(hw);
549
550out:
551 return ret_val;
552}
553
554/**
555 * e1000e_read_kmrn_reg - Read kumeran register
556 * @hw: pointer to the HW structure
557 * @offset: register offset to be read
558 * @data: pointer to the read data
559 *
560 * Acquires semaphore then reads the PHY register at offset using the
561 * kumeran interface. The information retrieved is stored in data.
562 * Release the acquired semaphore before exiting.
563 **/
564s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
565{
566 return __e1000_read_kmrn_reg(hw, offset, data, false);
567}
568
569/**
570 * e1000e_read_kmrn_reg_locked - Read kumeran register
571 * @hw: pointer to the HW structure
572 * @offset: register offset to be read
573 * @data: pointer to the read data
574 *
575 * Reads the PHY register at offset using the kumeran interface. The
576 * information retrieved is stored in data.
577 * Assumes semaphore already acquired.
578 **/
579s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
580{
581 return __e1000_read_kmrn_reg(hw, offset, data, true);
582}
583
584/**
585 * __e1000_write_kmrn_reg - Write kumeran register
586 * @hw: pointer to the HW structure
587 * @offset: register offset to write to
588 * @data: data to write at register offset
589 * @locked: semaphore has already been acquired or not
590 *
591 * Acquires semaphore, if necessary. Then write the data to PHY register
592 * at the offset using the kumeran interface. Release any acquired semaphores
593 * before exiting.
594 **/
595static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
596 bool locked)
597{
598 u32 kmrnctrlsta;
599 s32 ret_val = 0;
600
601 if (!locked) {
602 if (!(hw->phy.ops.acquire))
603 goto out;
604
605 ret_val = hw->phy.ops.acquire(hw);
606 if (ret_val)
607 goto out;
608 }
609
610 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
611 E1000_KMRNCTRLSTA_OFFSET) | data;
612 ew32(KMRNCTRLSTA, kmrnctrlsta);
613 e1e_flush();
614
615 udelay(2);
616
617 if (!locked)
618 hw->phy.ops.release(hw);
619
620out:
621 return ret_val;
622}
623
624/**
625 * e1000e_write_kmrn_reg - Write kumeran register
626 * @hw: pointer to the HW structure
627 * @offset: register offset to write to
628 * @data: data to write at register offset
629 *
630 * Acquires semaphore then writes the data to the PHY register at the offset
631 * using the kumeran interface. Release the acquired semaphore before exiting.
632 **/
633s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
634{
635 return __e1000_write_kmrn_reg(hw, offset, data, false);
636}
637
638/**
639 * e1000e_write_kmrn_reg_locked - Write kumeran register
640 * @hw: pointer to the HW structure
641 * @offset: register offset to write to
642 * @data: data to write at register offset
643 *
644 * Write the data to PHY register at the offset using the kumeran interface.
645 * Assumes semaphore already acquired.
646 **/
647s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
648{
649 return __e1000_write_kmrn_reg(hw, offset, data, true);
650}
651
652/**
653 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
654 * @hw: pointer to the HW structure
655 *
656 * Sets up Carrier-sense on Transmit and downshift values.
657 **/
658s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
659{
660 s32 ret_val;
661 u16 phy_data;
662
663 /* Enable CRS on Tx. This must be set for half-duplex operation. */
664 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
665 if (ret_val)
666 goto out;
667
668 phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
669
670 /* Enable downshift */
671 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
672
673 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
674
675out:
676 return ret_val;
677}
678
679/**
680 * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
681 * @hw: pointer to the HW structure
682 *
683 * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
684 * and downshift values are set also.
685 **/
686s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
687{
688 struct e1000_phy_info *phy = &hw->phy;
689 s32 ret_val;
690 u16 phy_data;
691
692 /* Enable CRS on Tx. This must be set for half-duplex operation. */
693 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
694 if (ret_val)
695 return ret_val;
696
697 /* For BM PHY this bit is downshift enable */
698 if (phy->type != e1000_phy_bm)
699 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
700
701 /*
702 * Options:
703 * MDI/MDI-X = 0 (default)
704 * 0 - Auto for all speeds
705 * 1 - MDI mode
706 * 2 - MDI-X mode
707 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
708 */
709 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
710
711 switch (phy->mdix) {
712 case 1:
713 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
714 break;
715 case 2:
716 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
717 break;
718 case 3:
719 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
720 break;
721 case 0:
722 default:
723 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
724 break;
725 }
726
727 /*
728 * Options:
729 * disable_polarity_correction = 0 (default)
730 * Automatic Correction for Reversed Cable Polarity
731 * 0 - Disabled
732 * 1 - Enabled
733 */
734 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
735 if (phy->disable_polarity_correction == 1)
736 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
737
738 /* Enable downshift on BM (disabled by default) */
739 if (phy->type == e1000_phy_bm)
740 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
741
742 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
743 if (ret_val)
744 return ret_val;
745
746 if ((phy->type == e1000_phy_m88) &&
747 (phy->revision < E1000_REVISION_4) &&
748 (phy->id != BME1000_E_PHY_ID_R2)) {
749 /*
750 * Force TX_CLK in the Extended PHY Specific Control Register
751 * to 25MHz clock.
752 */
753 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
754 if (ret_val)
755 return ret_val;
756
757 phy_data |= M88E1000_EPSCR_TX_CLK_25;
758
759 if ((phy->revision == 2) &&
760 (phy->id == M88E1111_I_PHY_ID)) {
761 /* 82573L PHY - set the downshift counter to 5x. */
762 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
763 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
764 } else {
765 /* Configure Master and Slave downshift values */
766 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
767 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
768 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
769 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
770 }
771 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
772 if (ret_val)
773 return ret_val;
774 }
775
776 if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
777 /* Set PHY page 0, register 29 to 0x0003 */
778 ret_val = e1e_wphy(hw, 29, 0x0003);
779 if (ret_val)
780 return ret_val;
781
782 /* Set PHY page 0, register 30 to 0x0000 */
783 ret_val = e1e_wphy(hw, 30, 0x0000);
784 if (ret_val)
785 return ret_val;
786 }
787
788 /* Commit the changes. */
789 ret_val = e1000e_commit_phy(hw);
790 if (ret_val) {
791 e_dbg("Error committing the PHY changes\n");
792 return ret_val;
793 }
794
795 if (phy->type == e1000_phy_82578) {
796 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
797 if (ret_val)
798 return ret_val;
799
800 /* 82578 PHY - set the downshift count to 1x. */
801 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
802 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
803 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
804 if (ret_val)
805 return ret_val;
806 }
807
808 return 0;
809}
810
811/**
812 * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
813 * @hw: pointer to the HW structure
814 *
815 * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
816 * igp PHY's.
817 **/
818s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
819{
820 struct e1000_phy_info *phy = &hw->phy;
821 s32 ret_val;
822 u16 data;
823
824 ret_val = e1000_phy_hw_reset(hw);
825 if (ret_val) {
826 e_dbg("Error resetting the PHY.\n");
827 return ret_val;
828 }
829
830 /*
831 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
832 * timeout issues when LFS is enabled.
833 */
834 msleep(100);
835
836 /* disable lplu d0 during driver init */
837 ret_val = e1000_set_d0_lplu_state(hw, false);
838 if (ret_val) {
839 e_dbg("Error Disabling LPLU D0\n");
840 return ret_val;
841 }
842 /* Configure mdi-mdix settings */
843 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
844 if (ret_val)
845 return ret_val;
846
847 data &= ~IGP01E1000_PSCR_AUTO_MDIX;
848
849 switch (phy->mdix) {
850 case 1:
851 data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
852 break;
853 case 2:
854 data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
855 break;
856 case 0:
857 default:
858 data |= IGP01E1000_PSCR_AUTO_MDIX;
859 break;
860 }
861 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
862 if (ret_val)
863 return ret_val;
864
865 /* set auto-master slave resolution settings */
866 if (hw->mac.autoneg) {
867 /*
868 * when autonegotiation advertisement is only 1000Mbps then we
869 * should disable SmartSpeed and enable Auto MasterSlave
870 * resolution as hardware default.
871 */
872 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
873 /* Disable SmartSpeed */
874 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
875 &data);
876 if (ret_val)
877 return ret_val;
878
879 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
880 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
881 data);
882 if (ret_val)
883 return ret_val;
884
885 /* Set auto Master/Slave resolution process */
886 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
887 if (ret_val)
888 return ret_val;
889
890 data &= ~CR_1000T_MS_ENABLE;
891 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
892 if (ret_val)
893 return ret_val;
894 }
895
896 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
897 if (ret_val)
898 return ret_val;
899
900 /* load defaults for future use */
901 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
902 ((data & CR_1000T_MS_VALUE) ?
903 e1000_ms_force_master :
904 e1000_ms_force_slave) :
905 e1000_ms_auto;
906
907 switch (phy->ms_type) {
908 case e1000_ms_force_master:
909 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
910 break;
911 case e1000_ms_force_slave:
912 data |= CR_1000T_MS_ENABLE;
913 data &= ~(CR_1000T_MS_VALUE);
914 break;
915 case e1000_ms_auto:
916 data &= ~CR_1000T_MS_ENABLE;
917 default:
918 break;
919 }
920 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
921 }
922
923 return ret_val;
924}
925
926/**
927 * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
928 * @hw: pointer to the HW structure
929 *
930 * Reads the MII auto-neg advertisement register and/or the 1000T control
931 * register and if the PHY is already setup for auto-negotiation, then
932 * return successful. Otherwise, setup advertisement and flow control to
933 * the appropriate values for the wanted auto-negotiation.
934 **/
935static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
936{
937 struct e1000_phy_info *phy = &hw->phy;
938 s32 ret_val;
939 u16 mii_autoneg_adv_reg;
940 u16 mii_1000t_ctrl_reg = 0;
941
942 phy->autoneg_advertised &= phy->autoneg_mask;
943
944 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
945 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
946 if (ret_val)
947 return ret_val;
948
949 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
950 /* Read the MII 1000Base-T Control Register (Address 9). */
951 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
952 if (ret_val)
953 return ret_val;
954 }
955
956 /*
957 * Need to parse both autoneg_advertised and fc and set up
958 * the appropriate PHY registers. First we will parse for
959 * autoneg_advertised software override. Since we can advertise
960 * a plethora of combinations, we need to check each bit
961 * individually.
962 */
963
964 /*
965 * First we clear all the 10/100 mb speed bits in the Auto-Neg
966 * Advertisement Register (Address 4) and the 1000 mb speed bits in
967 * the 1000Base-T Control Register (Address 9).
968 */
969 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
970 NWAY_AR_100TX_HD_CAPS |
971 NWAY_AR_10T_FD_CAPS |
972 NWAY_AR_10T_HD_CAPS);
973 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
974
975 e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
976
977 /* Do we want to advertise 10 Mb Half Duplex? */
978 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
979 e_dbg("Advertise 10mb Half duplex\n");
980 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
981 }
982
983 /* Do we want to advertise 10 Mb Full Duplex? */
984 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
985 e_dbg("Advertise 10mb Full duplex\n");
986 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
987 }
988
989 /* Do we want to advertise 100 Mb Half Duplex? */
990 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
991 e_dbg("Advertise 100mb Half duplex\n");
992 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
993 }
994
995 /* Do we want to advertise 100 Mb Full Duplex? */
996 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
997 e_dbg("Advertise 100mb Full duplex\n");
998 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
999 }
1000
1001 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
1002 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
1003 e_dbg("Advertise 1000mb Half duplex request denied!\n");
1004
1005 /* Do we want to advertise 1000 Mb Full Duplex? */
1006 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
1007 e_dbg("Advertise 1000mb Full duplex\n");
1008 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
1009 }
1010
1011 /*
1012 * Check for a software override of the flow control settings, and
1013 * setup the PHY advertisement registers accordingly. If
1014 * auto-negotiation is enabled, then software will have to set the
1015 * "PAUSE" bits to the correct value in the Auto-Negotiation
1016 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
1017 * negotiation.
1018 *
1019 * The possible values of the "fc" parameter are:
1020 * 0: Flow control is completely disabled
1021 * 1: Rx flow control is enabled (we can receive pause frames
1022 * but not send pause frames).
1023 * 2: Tx flow control is enabled (we can send pause frames
1024 * but we do not support receiving pause frames).
1025 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1026 * other: No software override. The flow control configuration
1027 * in the EEPROM is used.
1028 */
1029 switch (hw->fc.current_mode) {
1030 case e1000_fc_none:
1031 /*
1032 * Flow control (Rx & Tx) is completely disabled by a
1033 * software over-ride.
1034 */
1035 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1036 break;
1037 case e1000_fc_rx_pause:
1038 /*
1039 * Rx Flow control is enabled, and Tx Flow control is
1040 * disabled, by a software over-ride.
1041 *
1042 * Since there really isn't a way to advertise that we are
1043 * capable of Rx Pause ONLY, we will advertise that we
1044 * support both symmetric and asymmetric Rx PAUSE. Later
1045 * (in e1000e_config_fc_after_link_up) we will disable the
1046 * hw's ability to send PAUSE frames.
1047 */
1048 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1049 break;
1050 case e1000_fc_tx_pause:
1051 /*
1052 * Tx Flow control is enabled, and Rx Flow control is
1053 * disabled, by a software over-ride.
1054 */
1055 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1056 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1057 break;
1058 case e1000_fc_full:
1059 /*
1060 * Flow control (both Rx and Tx) is enabled by a software
1061 * over-ride.
1062 */
1063 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1064 break;
1065 default:
1066 e_dbg("Flow control param set incorrectly\n");
1067 ret_val = -E1000_ERR_CONFIG;
1068 return ret_val;
1069 }
1070
1071 ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
1072 if (ret_val)
1073 return ret_val;
1074
1075 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1076
1077 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
1078 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1079
1080 return ret_val;
1081}
1082
1083/**
1084 * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
1085 * @hw: pointer to the HW structure
1086 *
1087 * Performs initial bounds checking on autoneg advertisement parameter, then
1088 * configure to advertise the full capability. Setup the PHY to autoneg
1089 * and restart the negotiation process between the link partner. If
1090 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
1091 **/
1092static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1093{
1094 struct e1000_phy_info *phy = &hw->phy;
1095 s32 ret_val;
1096 u16 phy_ctrl;
1097
1098 /*
1099 * Perform some bounds checking on the autoneg advertisement
1100 * parameter.
1101 */
1102 phy->autoneg_advertised &= phy->autoneg_mask;
1103
1104 /*
1105 * If autoneg_advertised is zero, we assume it was not defaulted
1106 * by the calling code so we set to advertise full capability.
1107 */
1108 if (phy->autoneg_advertised == 0)
1109 phy->autoneg_advertised = phy->autoneg_mask;
1110
1111 e_dbg("Reconfiguring auto-neg advertisement params\n");
1112 ret_val = e1000_phy_setup_autoneg(hw);
1113 if (ret_val) {
1114 e_dbg("Error Setting up Auto-Negotiation\n");
1115 return ret_val;
1116 }
1117 e_dbg("Restarting Auto-Neg\n");
1118
1119 /*
1120 * Restart auto-negotiation by setting the Auto Neg Enable bit and
1121 * the Auto Neg Restart bit in the PHY control register.
1122 */
1123 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
1124 if (ret_val)
1125 return ret_val;
1126
1127 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
1128 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
1129 if (ret_val)
1130 return ret_val;
1131
1132 /*
1133 * Does the user want to wait for Auto-Neg to complete here, or
1134 * check at a later time (for example, callback routine).
1135 */
1136 if (phy->autoneg_wait_to_complete) {
1137 ret_val = e1000_wait_autoneg(hw);
1138 if (ret_val) {
1139 e_dbg("Error while waiting for "
1140 "autoneg to complete\n");
1141 return ret_val;
1142 }
1143 }
1144
1145 hw->mac.get_link_status = 1;
1146
1147 return ret_val;
1148}
1149
1150/**
1151 * e1000e_setup_copper_link - Configure copper link settings
1152 * @hw: pointer to the HW structure
1153 *
1154 * Calls the appropriate function to configure the link for auto-neg or forced
1155 * speed and duplex. Then we check for link, once link is established calls
1156 * to configure collision distance and flow control are called. If link is
1157 * not established, we return -E1000_ERR_PHY (-2).
1158 **/
1159s32 e1000e_setup_copper_link(struct e1000_hw *hw)
1160{
1161 s32 ret_val;
1162 bool link;
1163
1164 if (hw->mac.autoneg) {
1165 /*
1166 * Setup autoneg and flow control advertisement and perform
1167 * autonegotiation.
1168 */
1169 ret_val = e1000_copper_link_autoneg(hw);
1170 if (ret_val)
1171 return ret_val;
1172 } else {
1173 /*
1174 * PHY will be set to 10H, 10F, 100H or 100F
1175 * depending on user settings.
1176 */
1177 e_dbg("Forcing Speed and Duplex\n");
1178 ret_val = e1000_phy_force_speed_duplex(hw);
1179 if (ret_val) {
1180 e_dbg("Error Forcing Speed and Duplex\n");
1181 return ret_val;
1182 }
1183 }
1184
1185 /*
1186 * Check link status. Wait up to 100 microseconds for link to become
1187 * valid.
1188 */
1189 ret_val = e1000e_phy_has_link_generic(hw,
1190 COPPER_LINK_UP_LIMIT,
1191 10,
1192 &link);
1193 if (ret_val)
1194 return ret_val;
1195
1196 if (link) {
1197 e_dbg("Valid link established!!!\n");
1198 e1000e_config_collision_dist(hw);
1199 ret_val = e1000e_config_fc_after_link_up(hw);
1200 } else {
1201 e_dbg("Unable to establish link!!!\n");
1202 }
1203
1204 return ret_val;
1205}
1206
1207/**
1208 * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
1209 * @hw: pointer to the HW structure
1210 *
1211 * Calls the PHY setup function to force speed and duplex. Clears the
1212 * auto-crossover to force MDI manually. Waits for link and returns
1213 * successful if link up is successful, else -E1000_ERR_PHY (-2).
1214 **/
1215s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1216{
1217 struct e1000_phy_info *phy = &hw->phy;
1218 s32 ret_val;
1219 u16 phy_data;
1220 bool link;
1221
1222 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
1223 if (ret_val)
1224 return ret_val;
1225
1226 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
1227
1228 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
1229 if (ret_val)
1230 return ret_val;
1231
1232 /*
1233 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
1234 * forced whenever speed and duplex are forced.
1235 */
1236 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1237 if (ret_val)
1238 return ret_val;
1239
1240 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1241 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1242
1243 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1244 if (ret_val)
1245 return ret_val;
1246
1247 e_dbg("IGP PSCR: %X\n", phy_data);
1248
1249 udelay(1);
1250
1251 if (phy->autoneg_wait_to_complete) {
1252 e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
1253
1254 ret_val = e1000e_phy_has_link_generic(hw,
1255 PHY_FORCE_LIMIT,
1256 100000,
1257 &link);
1258 if (ret_val)
1259 return ret_val;
1260
1261 if (!link)
1262 e_dbg("Link taking longer than expected.\n");
1263
1264 /* Try once more */
1265 ret_val = e1000e_phy_has_link_generic(hw,
1266 PHY_FORCE_LIMIT,
1267 100000,
1268 &link);
1269 if (ret_val)
1270 return ret_val;
1271 }
1272
1273 return ret_val;
1274}
1275
1276/**
1277 * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
1278 * @hw: pointer to the HW structure
1279 *
1280 * Calls the PHY setup function to force speed and duplex. Clears the
1281 * auto-crossover to force MDI manually. Resets the PHY to commit the
1282 * changes. If time expires while waiting for link up, we reset the DSP.
1283 * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
1284 * successful completion, else return corresponding error code.
1285 **/
1286s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1287{
1288 struct e1000_phy_info *phy = &hw->phy;
1289 s32 ret_val;
1290 u16 phy_data;
1291 bool link;
1292
1293 /*
1294 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
1295 * forced whenever speed and duplex are forced.
1296 */
1297 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1298 if (ret_val)
1299 return ret_val;
1300
1301 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1302 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1303 if (ret_val)
1304 return ret_val;
1305
1306 e_dbg("M88E1000 PSCR: %X\n", phy_data);
1307
1308 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
1309 if (ret_val)
1310 return ret_val;
1311
1312 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
1313
1314 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
1315 if (ret_val)
1316 return ret_val;
1317
1318 /* Reset the phy to commit changes. */
1319 ret_val = e1000e_commit_phy(hw);
1320 if (ret_val)
1321 return ret_val;
1322
1323 if (phy->autoneg_wait_to_complete) {
1324 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
1325
1326 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1327 100000, &link);
1328 if (ret_val)
1329 return ret_val;
1330
1331 if (!link) {
1332 if (hw->phy.type != e1000_phy_m88) {
1333 e_dbg("Link taking longer than expected.\n");
1334 } else {
1335 /*
1336 * We didn't get link.
1337 * Reset the DSP and cross our fingers.
1338 */
1339 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1340 0x001d);
1341 if (ret_val)
1342 return ret_val;
1343 ret_val = e1000e_phy_reset_dsp(hw);
1344 if (ret_val)
1345 return ret_val;
1346 }
1347 }
1348
1349 /* Try once more */
1350 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1351 100000, &link);
1352 if (ret_val)
1353 return ret_val;
1354 }
1355
1356 if (hw->phy.type != e1000_phy_m88)
1357 return 0;
1358
1359 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1360 if (ret_val)
1361 return ret_val;
1362
1363 /*
1364 * Resetting the phy means we need to re-force TX_CLK in the
1365 * Extended PHY Specific Control Register to 25MHz clock from
1366 * the reset value of 2.5MHz.
1367 */
1368 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1369 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1370 if (ret_val)
1371 return ret_val;
1372
1373 /*
1374 * In addition, we must re-enable CRS on Tx for both half and full
1375 * duplex.
1376 */
1377 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1378 if (ret_val)
1379 return ret_val;
1380
1381 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1382 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1383
1384 return ret_val;
1385}
1386
1387/**
1388 * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
1389 * @hw: pointer to the HW structure
1390 *
1391 * Forces the speed and duplex settings of the PHY.
1392 * This is a function pointer entry point only called by
1393 * PHY setup routines.
1394 **/
1395s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
1396{
1397 struct e1000_phy_info *phy = &hw->phy;
1398 s32 ret_val;
1399 u16 data;
1400 bool link;
1401
1402 ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
1403 if (ret_val)
1404 goto out;
1405
1406 e1000e_phy_force_speed_duplex_setup(hw, &data);
1407
1408 ret_val = e1e_wphy(hw, PHY_CONTROL, data);
1409 if (ret_val)
1410 goto out;
1411
1412 /* Disable MDI-X support for 10/100 */
1413 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
1414 if (ret_val)
1415 goto out;
1416
1417 data &= ~IFE_PMC_AUTO_MDIX;
1418 data &= ~IFE_PMC_FORCE_MDIX;
1419
1420 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
1421 if (ret_val)
1422 goto out;
1423
1424 e_dbg("IFE PMC: %X\n", data);
1425
1426 udelay(1);
1427
1428 if (phy->autoneg_wait_to_complete) {
1429 e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
1430
1431 ret_val = e1000e_phy_has_link_generic(hw,
1432 PHY_FORCE_LIMIT,
1433 100000,
1434 &link);
1435 if (ret_val)
1436 goto out;
1437
1438 if (!link)
1439 e_dbg("Link taking longer than expected.\n");
1440
1441 /* Try once more */
1442 ret_val = e1000e_phy_has_link_generic(hw,
1443 PHY_FORCE_LIMIT,
1444 100000,
1445 &link);
1446 if (ret_val)
1447 goto out;
1448 }
1449
1450out:
1451 return ret_val;
1452}
1453
1454/**
1455 * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
1456 * @hw: pointer to the HW structure
1457 * @phy_ctrl: pointer to current value of PHY_CONTROL
1458 *
1459 * Forces speed and duplex on the PHY by doing the following: disable flow
1460 * control, force speed/duplex on the MAC, disable auto speed detection,
1461 * disable auto-negotiation, configure duplex, configure speed, configure
1462 * the collision distance, write configuration to CTRL register. The
1463 * caller must write to the PHY_CONTROL register for these settings to
1464 * take affect.
1465 **/
1466void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1467{
1468 struct e1000_mac_info *mac = &hw->mac;
1469 u32 ctrl;
1470
1471 /* Turn off flow control when forcing speed/duplex */
1472 hw->fc.current_mode = e1000_fc_none;
1473
1474 /* Force speed/duplex on the mac */
1475 ctrl = er32(CTRL);
1476 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1477 ctrl &= ~E1000_CTRL_SPD_SEL;
1478
1479 /* Disable Auto Speed Detection */
1480 ctrl &= ~E1000_CTRL_ASDE;
1481
1482 /* Disable autoneg on the phy */
1483 *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
1484
1485 /* Forcing Full or Half Duplex? */
1486 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1487 ctrl &= ~E1000_CTRL_FD;
1488 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1489 e_dbg("Half Duplex\n");
1490 } else {
1491 ctrl |= E1000_CTRL_FD;
1492 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1493 e_dbg("Full Duplex\n");
1494 }
1495
1496 /* Forcing 10mb or 100mb? */
1497 if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
1498 ctrl |= E1000_CTRL_SPD_100;
1499 *phy_ctrl |= MII_CR_SPEED_100;
1500 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1501 e_dbg("Forcing 100mb\n");
1502 } else {
1503 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1504 *phy_ctrl |= MII_CR_SPEED_10;
1505 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1506 e_dbg("Forcing 10mb\n");
1507 }
1508
1509 e1000e_config_collision_dist(hw);
1510
1511 ew32(CTRL, ctrl);
1512}
1513
1514/**
1515 * e1000e_set_d3_lplu_state - Sets low power link up state for D3
1516 * @hw: pointer to the HW structure
1517 * @active: boolean used to enable/disable lplu
1518 *
1519 * Success returns 0, Failure returns 1
1520 *
1521 * The low power link up (lplu) state is set to the power management level D3
1522 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1523 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1524 * is used during Dx states where the power conservation is most important.
1525 * During driver activity, SmartSpeed should be enabled so performance is
1526 * maintained.
1527 **/
1528s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1529{
1530 struct e1000_phy_info *phy = &hw->phy;
1531 s32 ret_val;
1532 u16 data;
1533
1534 ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1535 if (ret_val)
1536 return ret_val;
1537
1538 if (!active) {
1539 data &= ~IGP02E1000_PM_D3_LPLU;
1540 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1541 if (ret_val)
1542 return ret_val;
1543 /*
1544 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1545 * during Dx states where the power conservation is most
1546 * important. During driver activity we should enable
1547 * SmartSpeed, so performance is maintained.
1548 */
1549 if (phy->smart_speed == e1000_smart_speed_on) {
1550 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1551 &data);
1552 if (ret_val)
1553 return ret_val;
1554
1555 data |= IGP01E1000_PSCFR_SMART_SPEED;
1556 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1557 data);
1558 if (ret_val)
1559 return ret_val;
1560 } else if (phy->smart_speed == e1000_smart_speed_off) {
1561 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1562 &data);
1563 if (ret_val)
1564 return ret_val;
1565
1566 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1567 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1568 data);
1569 if (ret_val)
1570 return ret_val;
1571 }
1572 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1573 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1574 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1575 data |= IGP02E1000_PM_D3_LPLU;
1576 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1577 if (ret_val)
1578 return ret_val;
1579
1580 /* When LPLU is enabled, we should disable SmartSpeed */
1581 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1582 if (ret_val)
1583 return ret_val;
1584
1585 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1586 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1587 }
1588
1589 return ret_val;
1590}
1591
1592/**
1593 * e1000e_check_downshift - Checks whether a downshift in speed occurred
1594 * @hw: pointer to the HW structure
1595 *
1596 * Success returns 0, Failure returns 1
1597 *
1598 * A downshift is detected by querying the PHY link health.
1599 **/
1600s32 e1000e_check_downshift(struct e1000_hw *hw)
1601{
1602 struct e1000_phy_info *phy = &hw->phy;
1603 s32 ret_val;
1604 u16 phy_data, offset, mask;
1605
1606 switch (phy->type) {
1607 case e1000_phy_m88:
1608 case e1000_phy_gg82563:
1609 case e1000_phy_bm:
1610 case e1000_phy_82578:
1611 offset = M88E1000_PHY_SPEC_STATUS;
1612 mask = M88E1000_PSSR_DOWNSHIFT;
1613 break;
1614 case e1000_phy_igp_2:
1615 case e1000_phy_igp_3:
1616 offset = IGP01E1000_PHY_LINK_HEALTH;
1617 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1618 break;
1619 default:
1620 /* speed downshift not supported */
1621 phy->speed_downgraded = false;
1622 return 0;
1623 }
1624
1625 ret_val = e1e_rphy(hw, offset, &phy_data);
1626
1627 if (!ret_val)
1628 phy->speed_downgraded = (phy_data & mask);
1629
1630 return ret_val;
1631}
1632
1633/**
1634 * e1000_check_polarity_m88 - Checks the polarity.
1635 * @hw: pointer to the HW structure
1636 *
1637 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1638 *
1639 * Polarity is determined based on the PHY specific status register.
1640 **/
1641s32 e1000_check_polarity_m88(struct e1000_hw *hw)
1642{
1643 struct e1000_phy_info *phy = &hw->phy;
1644 s32 ret_val;
1645 u16 data;
1646
1647 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
1648
1649 if (!ret_val)
1650 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
1651 ? e1000_rev_polarity_reversed
1652 : e1000_rev_polarity_normal;
1653
1654 return ret_val;
1655}
1656
1657/**
1658 * e1000_check_polarity_igp - Checks the polarity.
1659 * @hw: pointer to the HW structure
1660 *
1661 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1662 *
1663 * Polarity is determined based on the PHY port status register, and the
1664 * current speed (since there is no polarity at 100Mbps).
1665 **/
1666s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1667{
1668 struct e1000_phy_info *phy = &hw->phy;
1669 s32 ret_val;
1670 u16 data, offset, mask;
1671
1672 /*
1673 * Polarity is determined based on the speed of
1674 * our connection.
1675 */
1676 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1677 if (ret_val)
1678 return ret_val;
1679
1680 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1681 IGP01E1000_PSSR_SPEED_1000MBPS) {
1682 offset = IGP01E1000_PHY_PCS_INIT_REG;
1683 mask = IGP01E1000_PHY_POLARITY_MASK;
1684 } else {
1685 /*
1686 * This really only applies to 10Mbps since
1687 * there is no polarity for 100Mbps (always 0).
1688 */
1689 offset = IGP01E1000_PHY_PORT_STATUS;
1690 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1691 }
1692
1693 ret_val = e1e_rphy(hw, offset, &data);
1694
1695 if (!ret_val)
1696 phy->cable_polarity = (data & mask)
1697 ? e1000_rev_polarity_reversed
1698 : e1000_rev_polarity_normal;
1699
1700 return ret_val;
1701}
1702
1703/**
1704 * e1000_check_polarity_ife - Check cable polarity for IFE PHY
1705 * @hw: pointer to the HW structure
1706 *
1707 * Polarity is determined on the polarity reversal feature being enabled.
1708 **/
1709s32 e1000_check_polarity_ife(struct e1000_hw *hw)
1710{
1711 struct e1000_phy_info *phy = &hw->phy;
1712 s32 ret_val;
1713 u16 phy_data, offset, mask;
1714
1715 /*
1716 * Polarity is determined based on the reversal feature being enabled.
1717 */
1718 if (phy->polarity_correction) {
1719 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
1720 mask = IFE_PESC_POLARITY_REVERSED;
1721 } else {
1722 offset = IFE_PHY_SPECIAL_CONTROL;
1723 mask = IFE_PSC_FORCE_POLARITY;
1724 }
1725
1726 ret_val = e1e_rphy(hw, offset, &phy_data);
1727
1728 if (!ret_val)
1729 phy->cable_polarity = (phy_data & mask)
1730 ? e1000_rev_polarity_reversed
1731 : e1000_rev_polarity_normal;
1732
1733 return ret_val;
1734}
1735
1736/**
1737 * e1000_wait_autoneg - Wait for auto-neg completion
1738 * @hw: pointer to the HW structure
1739 *
1740 * Waits for auto-negotiation to complete or for the auto-negotiation time
1741 * limit to expire, which ever happens first.
1742 **/
1743static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1744{
1745 s32 ret_val = 0;
1746 u16 i, phy_status;
1747
1748 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1749 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1750 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1751 if (ret_val)
1752 break;
1753 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1754 if (ret_val)
1755 break;
1756 if (phy_status & MII_SR_AUTONEG_COMPLETE)
1757 break;
1758 msleep(100);
1759 }
1760
1761 /*
1762 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1763 * has completed.
1764 */
1765 return ret_val;
1766}
1767
1768/**
1769 * e1000e_phy_has_link_generic - Polls PHY for link
1770 * @hw: pointer to the HW structure
1771 * @iterations: number of times to poll for link
1772 * @usec_interval: delay between polling attempts
1773 * @success: pointer to whether polling was successful or not
1774 *
1775 * Polls the PHY status register for link, 'iterations' number of times.
1776 **/
1777s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1778 u32 usec_interval, bool *success)
1779{
1780 s32 ret_val = 0;
1781 u16 i, phy_status;
1782
1783 for (i = 0; i < iterations; i++) {
1784 /*
1785 * Some PHYs require the PHY_STATUS register to be read
1786 * twice due to the link bit being sticky. No harm doing
1787 * it across the board.
1788 */
1789 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1790 if (ret_val)
1791 /*
1792 * If the first read fails, another entity may have
1793 * ownership of the resources, wait and try again to
1794 * see if they have relinquished the resources yet.
1795 */
1796 udelay(usec_interval);
1797 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1798 if (ret_val)
1799 break;
1800 if (phy_status & MII_SR_LINK_STATUS)
1801 break;
1802 if (usec_interval >= 1000)
1803 mdelay(usec_interval/1000);
1804 else
1805 udelay(usec_interval);
1806 }
1807
1808 *success = (i < iterations);
1809
1810 return ret_val;
1811}
1812
1813/**
1814 * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
1815 * @hw: pointer to the HW structure
1816 *
1817 * Reads the PHY specific status register to retrieve the cable length
1818 * information. The cable length is determined by averaging the minimum and
1819 * maximum values to get the "average" cable length. The m88 PHY has four
1820 * possible cable length values, which are:
1821 * Register Value Cable Length
1822 * 0 < 50 meters
1823 * 1 50 - 80 meters
1824 * 2 80 - 110 meters
1825 * 3 110 - 140 meters
1826 * 4 > 140 meters
1827 **/
1828s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1829{
1830 struct e1000_phy_info *phy = &hw->phy;
1831 s32 ret_val;
1832 u16 phy_data, index;
1833
1834 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1835 if (ret_val)
1836 goto out;
1837
1838 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1839 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1840 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1841 ret_val = -E1000_ERR_PHY;
1842 goto out;
1843 }
1844
1845 phy->min_cable_length = e1000_m88_cable_length_table[index];
1846 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1847
1848 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1849
1850out:
1851 return ret_val;
1852}
1853
1854/**
1855 * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1856 * @hw: pointer to the HW structure
1857 *
1858 * The automatic gain control (agc) normalizes the amplitude of the
1859 * received signal, adjusting for the attenuation produced by the
1860 * cable. By reading the AGC registers, which represent the
1861 * combination of coarse and fine gain value, the value can be put
1862 * into a lookup table to obtain the approximate cable length
1863 * for each channel.
1864 **/
1865s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1866{
1867 struct e1000_phy_info *phy = &hw->phy;
1868 s32 ret_val;
1869 u16 phy_data, i, agc_value = 0;
1870 u16 cur_agc_index, max_agc_index = 0;
1871 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1872 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1873 IGP02E1000_PHY_AGC_A,
1874 IGP02E1000_PHY_AGC_B,
1875 IGP02E1000_PHY_AGC_C,
1876 IGP02E1000_PHY_AGC_D
1877 };
1878
1879 /* Read the AGC registers for all channels */
1880 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1881 ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
1882 if (ret_val)
1883 return ret_val;
1884
1885 /*
1886 * Getting bits 15:9, which represent the combination of
1887 * coarse and fine gain values. The result is a number
1888 * that can be put into the lookup table to obtain the
1889 * approximate cable length.
1890 */
1891 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1892 IGP02E1000_AGC_LENGTH_MASK;
1893
1894 /* Array index bound check. */
1895 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
1896 (cur_agc_index == 0))
1897 return -E1000_ERR_PHY;
1898
1899 /* Remove min & max AGC values from calculation. */
1900 if (e1000_igp_2_cable_length_table[min_agc_index] >
1901 e1000_igp_2_cable_length_table[cur_agc_index])
1902 min_agc_index = cur_agc_index;
1903 if (e1000_igp_2_cable_length_table[max_agc_index] <
1904 e1000_igp_2_cable_length_table[cur_agc_index])
1905 max_agc_index = cur_agc_index;
1906
1907 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
1908 }
1909
1910 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
1911 e1000_igp_2_cable_length_table[max_agc_index]);
1912 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1913
1914 /* Calculate cable length with the error range of +/- 10 meters. */
1915 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1916 (agc_value - IGP02E1000_AGC_RANGE) : 0;
1917 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1918
1919 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1920
1921 return ret_val;
1922}
1923
1924/**
1925 * e1000e_get_phy_info_m88 - Retrieve PHY information
1926 * @hw: pointer to the HW structure
1927 *
1928 * Valid for only copper links. Read the PHY status register (sticky read)
1929 * to verify that link is up. Read the PHY special control register to
1930 * determine the polarity and 10base-T extended distance. Read the PHY
1931 * special status register to determine MDI/MDIx and current speed. If
1932 * speed is 1000, then determine cable length, local and remote receiver.
1933 **/
1934s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1935{
1936 struct e1000_phy_info *phy = &hw->phy;
1937 s32 ret_val;
1938 u16 phy_data;
1939 bool link;
1940
1941 if (phy->media_type != e1000_media_type_copper) {
1942 e_dbg("Phy info is only valid for copper media\n");
1943 return -E1000_ERR_CONFIG;
1944 }
1945
1946 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1947 if (ret_val)
1948 return ret_val;
1949
1950 if (!link) {
1951 e_dbg("Phy info is only valid if link is up\n");
1952 return -E1000_ERR_CONFIG;
1953 }
1954
1955 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1956 if (ret_val)
1957 return ret_val;
1958
1959 phy->polarity_correction = (phy_data &
1960 M88E1000_PSCR_POLARITY_REVERSAL);
1961
1962 ret_val = e1000_check_polarity_m88(hw);
1963 if (ret_val)
1964 return ret_val;
1965
1966 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1967 if (ret_val)
1968 return ret_val;
1969
1970 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
1971
1972 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1973 ret_val = e1000_get_cable_length(hw);
1974 if (ret_val)
1975 return ret_val;
1976
1977 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
1978 if (ret_val)
1979 return ret_val;
1980
1981 phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
1982 ? e1000_1000t_rx_status_ok
1983 : e1000_1000t_rx_status_not_ok;
1984
1985 phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
1986 ? e1000_1000t_rx_status_ok
1987 : e1000_1000t_rx_status_not_ok;
1988 } else {
1989 /* Set values to "undefined" */
1990 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1991 phy->local_rx = e1000_1000t_rx_status_undefined;
1992 phy->remote_rx = e1000_1000t_rx_status_undefined;
1993 }
1994
1995 return ret_val;
1996}
1997
1998/**
1999 * e1000e_get_phy_info_igp - Retrieve igp PHY information
2000 * @hw: pointer to the HW structure
2001 *
2002 * Read PHY status to determine if link is up. If link is up, then
2003 * set/determine 10base-T extended distance and polarity correction. Read
2004 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
2005 * determine on the cable length, local and remote receiver.
2006 **/
2007s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
2008{
2009 struct e1000_phy_info *phy = &hw->phy;
2010 s32 ret_val;
2011 u16 data;
2012 bool link;
2013
2014 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
2015 if (ret_val)
2016 return ret_val;
2017
2018 if (!link) {
2019 e_dbg("Phy info is only valid if link is up\n");
2020 return -E1000_ERR_CONFIG;
2021 }
2022
2023 phy->polarity_correction = true;
2024
2025 ret_val = e1000_check_polarity_igp(hw);
2026 if (ret_val)
2027 return ret_val;
2028
2029 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
2030 if (ret_val)
2031 return ret_val;
2032
2033 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
2034
2035 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
2036 IGP01E1000_PSSR_SPEED_1000MBPS) {
2037 ret_val = e1000_get_cable_length(hw);
2038 if (ret_val)
2039 return ret_val;
2040
2041 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
2042 if (ret_val)
2043 return ret_val;
2044
2045 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2046 ? e1000_1000t_rx_status_ok
2047 : e1000_1000t_rx_status_not_ok;
2048
2049 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2050 ? e1000_1000t_rx_status_ok
2051 : e1000_1000t_rx_status_not_ok;
2052 } else {
2053 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2054 phy->local_rx = e1000_1000t_rx_status_undefined;
2055 phy->remote_rx = e1000_1000t_rx_status_undefined;
2056 }
2057
2058 return ret_val;
2059}
2060
2061/**
2062 * e1000_get_phy_info_ife - Retrieves various IFE PHY states
2063 * @hw: pointer to the HW structure
2064 *
2065 * Populates "phy" structure with various feature states.
2066 **/
2067s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2068{
2069 struct e1000_phy_info *phy = &hw->phy;
2070 s32 ret_val;
2071 u16 data;
2072 bool link;
2073
2074 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
2075 if (ret_val)
2076 goto out;
2077
2078 if (!link) {
2079 e_dbg("Phy info is only valid if link is up\n");
2080 ret_val = -E1000_ERR_CONFIG;
2081 goto out;
2082 }
2083
2084 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
2085 if (ret_val)
2086 goto out;
2087 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
2088 ? false : true;
2089
2090 if (phy->polarity_correction) {
2091 ret_val = e1000_check_polarity_ife(hw);
2092 if (ret_val)
2093 goto out;
2094 } else {
2095 /* Polarity is forced */
2096 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
2097 ? e1000_rev_polarity_reversed
2098 : e1000_rev_polarity_normal;
2099 }
2100
2101 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
2102 if (ret_val)
2103 goto out;
2104
2105 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
2106
2107 /* The following parameters are undefined for 10/100 operation. */
2108 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2109 phy->local_rx = e1000_1000t_rx_status_undefined;
2110 phy->remote_rx = e1000_1000t_rx_status_undefined;
2111
2112out:
2113 return ret_val;
2114}
2115
2116/**
2117 * e1000e_phy_sw_reset - PHY software reset
2118 * @hw: pointer to the HW structure
2119 *
2120 * Does a software reset of the PHY by reading the PHY control register and
2121 * setting/write the control register reset bit to the PHY.
2122 **/
2123s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
2124{
2125 s32 ret_val;
2126 u16 phy_ctrl;
2127
2128 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
2129 if (ret_val)
2130 return ret_val;
2131
2132 phy_ctrl |= MII_CR_RESET;
2133 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
2134 if (ret_val)
2135 return ret_val;
2136
2137 udelay(1);
2138
2139 return ret_val;
2140}
2141
2142/**
2143 * e1000e_phy_hw_reset_generic - PHY hardware reset
2144 * @hw: pointer to the HW structure
2145 *
2146 * Verify the reset block is not blocking us from resetting. Acquire
2147 * semaphore (if necessary) and read/set/write the device control reset
2148 * bit in the PHY. Wait the appropriate delay time for the device to
2149 * reset and release the semaphore (if necessary).
2150 **/
2151s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2152{
2153 struct e1000_phy_info *phy = &hw->phy;
2154 s32 ret_val;
2155 u32 ctrl;
2156
2157 ret_val = e1000_check_reset_block(hw);
2158 if (ret_val)
2159 return 0;
2160
2161 ret_val = phy->ops.acquire(hw);
2162 if (ret_val)
2163 return ret_val;
2164
2165 ctrl = er32(CTRL);
2166 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
2167 e1e_flush();
2168
2169 udelay(phy->reset_delay_us);
2170
2171 ew32(CTRL, ctrl);
2172 e1e_flush();
2173
2174 udelay(150);
2175
2176 phy->ops.release(hw);
2177
2178 return e1000_get_phy_cfg_done(hw);
2179}
2180
2181/**
2182 * e1000e_get_cfg_done - Generic configuration done
2183 * @hw: pointer to the HW structure
2184 *
2185 * Generic function to wait 10 milli-seconds for configuration to complete
2186 * and return success.
2187 **/
2188s32 e1000e_get_cfg_done(struct e1000_hw *hw)
2189{
2190 mdelay(10);
2191 return 0;
2192}
2193
2194/**
2195 * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
2196 * @hw: pointer to the HW structure
2197 *
2198 * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
2199 **/
2200s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
2201{
2202 e_dbg("Running IGP 3 PHY init script\n");
2203
2204 /* PHY init IGP 3 */
2205 /* Enable rise/fall, 10-mode work in class-A */
2206 e1e_wphy(hw, 0x2F5B, 0x9018);
2207 /* Remove all caps from Replica path filter */
2208 e1e_wphy(hw, 0x2F52, 0x0000);
2209 /* Bias trimming for ADC, AFE and Driver (Default) */
2210 e1e_wphy(hw, 0x2FB1, 0x8B24);
2211 /* Increase Hybrid poly bias */
2212 e1e_wphy(hw, 0x2FB2, 0xF8F0);
2213 /* Add 4% to Tx amplitude in Gig mode */
2214 e1e_wphy(hw, 0x2010, 0x10B0);
2215 /* Disable trimming (TTT) */
2216 e1e_wphy(hw, 0x2011, 0x0000);
2217 /* Poly DC correction to 94.6% + 2% for all channels */
2218 e1e_wphy(hw, 0x20DD, 0x249A);
2219 /* ABS DC correction to 95.9% */
2220 e1e_wphy(hw, 0x20DE, 0x00D3);
2221 /* BG temp curve trim */
2222 e1e_wphy(hw, 0x28B4, 0x04CE);
2223 /* Increasing ADC OPAMP stage 1 currents to max */
2224 e1e_wphy(hw, 0x2F70, 0x29E4);
2225 /* Force 1000 ( required for enabling PHY regs configuration) */
2226 e1e_wphy(hw, 0x0000, 0x0140);
2227 /* Set upd_freq to 6 */
2228 e1e_wphy(hw, 0x1F30, 0x1606);
2229 /* Disable NPDFE */
2230 e1e_wphy(hw, 0x1F31, 0xB814);
2231 /* Disable adaptive fixed FFE (Default) */
2232 e1e_wphy(hw, 0x1F35, 0x002A);
2233 /* Enable FFE hysteresis */
2234 e1e_wphy(hw, 0x1F3E, 0x0067);
2235 /* Fixed FFE for short cable lengths */
2236 e1e_wphy(hw, 0x1F54, 0x0065);
2237 /* Fixed FFE for medium cable lengths */
2238 e1e_wphy(hw, 0x1F55, 0x002A);
2239 /* Fixed FFE for long cable lengths */
2240 e1e_wphy(hw, 0x1F56, 0x002A);
2241 /* Enable Adaptive Clip Threshold */
2242 e1e_wphy(hw, 0x1F72, 0x3FB0);
2243 /* AHT reset limit to 1 */
2244 e1e_wphy(hw, 0x1F76, 0xC0FF);
2245 /* Set AHT master delay to 127 msec */
2246 e1e_wphy(hw, 0x1F77, 0x1DEC);
2247 /* Set scan bits for AHT */
2248 e1e_wphy(hw, 0x1F78, 0xF9EF);
2249 /* Set AHT Preset bits */
2250 e1e_wphy(hw, 0x1F79, 0x0210);
2251 /* Change integ_factor of channel A to 3 */
2252 e1e_wphy(hw, 0x1895, 0x0003);
2253 /* Change prop_factor of channels BCD to 8 */
2254 e1e_wphy(hw, 0x1796, 0x0008);
2255 /* Change cg_icount + enable integbp for channels BCD */
2256 e1e_wphy(hw, 0x1798, 0xD008);
2257 /*
2258 * Change cg_icount + enable integbp + change prop_factor_master
2259 * to 8 for channel A
2260 */
2261 e1e_wphy(hw, 0x1898, 0xD918);
2262 /* Disable AHT in Slave mode on channel A */
2263 e1e_wphy(hw, 0x187A, 0x0800);
2264 /*
2265 * Enable LPLU and disable AN to 1000 in non-D0a states,
2266 * Enable SPD+B2B
2267 */
2268 e1e_wphy(hw, 0x0019, 0x008D);
2269 /* Enable restart AN on an1000_dis change */
2270 e1e_wphy(hw, 0x001B, 0x2080);
2271 /* Enable wh_fifo read clock in 10/100 modes */
2272 e1e_wphy(hw, 0x0014, 0x0045);
2273 /* Restart AN, Speed selection is 1000 */
2274 e1e_wphy(hw, 0x0000, 0x1340);
2275
2276 return 0;
2277}
2278
2279/* Internal function pointers */
2280
2281/**
2282 * e1000_get_phy_cfg_done - Generic PHY configuration done
2283 * @hw: pointer to the HW structure
2284 *
2285 * Return success if silicon family did not implement a family specific
2286 * get_cfg_done function.
2287 **/
2288static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
2289{
2290 if (hw->phy.ops.get_cfg_done)
2291 return hw->phy.ops.get_cfg_done(hw);
2292
2293 return 0;
2294}
2295
2296/**
2297 * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
2298 * @hw: pointer to the HW structure
2299 *
2300 * When the silicon family has not implemented a forced speed/duplex
2301 * function for the PHY, simply return 0.
2302 **/
2303static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2304{
2305 if (hw->phy.ops.force_speed_duplex)
2306 return hw->phy.ops.force_speed_duplex(hw);
2307
2308 return 0;
2309}
2310
2311/**
2312 * e1000e_get_phy_type_from_id - Get PHY type from id
2313 * @phy_id: phy_id read from the phy
2314 *
2315 * Returns the phy type from the id.
2316 **/
2317enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2318{
2319 enum e1000_phy_type phy_type = e1000_phy_unknown;
2320
2321 switch (phy_id) {
2322 case M88E1000_I_PHY_ID:
2323 case M88E1000_E_PHY_ID:
2324 case M88E1111_I_PHY_ID:
2325 case M88E1011_I_PHY_ID:
2326 phy_type = e1000_phy_m88;
2327 break;
2328 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
2329 phy_type = e1000_phy_igp_2;
2330 break;
2331 case GG82563_E_PHY_ID:
2332 phy_type = e1000_phy_gg82563;
2333 break;
2334 case IGP03E1000_E_PHY_ID:
2335 phy_type = e1000_phy_igp_3;
2336 break;
2337 case IFE_E_PHY_ID:
2338 case IFE_PLUS_E_PHY_ID:
2339 case IFE_C_E_PHY_ID:
2340 phy_type = e1000_phy_ife;
2341 break;
2342 case BME1000_E_PHY_ID:
2343 case BME1000_E_PHY_ID_R2:
2344 phy_type = e1000_phy_bm;
2345 break;
2346 case I82578_E_PHY_ID:
2347 phy_type = e1000_phy_82578;
2348 break;
2349 case I82577_E_PHY_ID:
2350 phy_type = e1000_phy_82577;
2351 break;
2352 case I82579_E_PHY_ID:
2353 phy_type = e1000_phy_82579;
2354 break;
2355 default:
2356 phy_type = e1000_phy_unknown;
2357 break;
2358 }
2359 return phy_type;
2360}
2361
2362/**
2363 * e1000e_determine_phy_address - Determines PHY address.
2364 * @hw: pointer to the HW structure
2365 *
2366 * This uses a trial and error method to loop through possible PHY
2367 * addresses. It tests each by reading the PHY ID registers and
2368 * checking for a match.
2369 **/
2370s32 e1000e_determine_phy_address(struct e1000_hw *hw)
2371{
2372 s32 ret_val = -E1000_ERR_PHY_TYPE;
2373 u32 phy_addr = 0;
2374 u32 i;
2375 enum e1000_phy_type phy_type = e1000_phy_unknown;
2376
2377 hw->phy.id = phy_type;
2378
2379 for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
2380 hw->phy.addr = phy_addr;
2381 i = 0;
2382
2383 do {
2384 e1000e_get_phy_id(hw);
2385 phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
2386
2387 /*
2388 * If phy_type is valid, break - we found our
2389 * PHY address
2390 */
2391 if (phy_type != e1000_phy_unknown) {
2392 ret_val = 0;
2393 goto out;
2394 }
2395 usleep_range(1000, 2000);
2396 i++;
2397 } while (i < 10);
2398 }
2399
2400out:
2401 return ret_val;
2402}
2403
2404/**
2405 * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
2406 * @page: page to access
2407 *
2408 * Returns the phy address for the page requested.
2409 **/
2410static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
2411{
2412 u32 phy_addr = 2;
2413
2414 if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
2415 phy_addr = 1;
2416
2417 return phy_addr;
2418}
2419
2420/**
2421 * e1000e_write_phy_reg_bm - Write BM PHY register
2422 * @hw: pointer to the HW structure
2423 * @offset: register offset to write to
2424 * @data: data to write at register offset
2425 *
2426 * Acquires semaphore, if necessary, then writes the data to PHY register
2427 * at the offset. Release any acquired semaphores before exiting.
2428 **/
2429s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2430{
2431 s32 ret_val;
2432 u32 page = offset >> IGP_PAGE_SHIFT;
2433
2434 ret_val = hw->phy.ops.acquire(hw);
2435 if (ret_val)
2436 return ret_val;
2437
2438 /* Page 800 works differently than the rest so it has its own func */
2439 if (page == BM_WUC_PAGE) {
2440 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2441 false, false);
2442 goto out;
2443 }
2444
2445 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2446
2447 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2448 u32 page_shift, page_select;
2449
2450 /*
2451 * Page select is register 31 for phy address 1 and 22 for
2452 * phy address 2 and 3. Page select is shifted only for
2453 * phy address 1.
2454 */
2455 if (hw->phy.addr == 1) {
2456 page_shift = IGP_PAGE_SHIFT;
2457 page_select = IGP01E1000_PHY_PAGE_SELECT;
2458 } else {
2459 page_shift = 0;
2460 page_select = BM_PHY_PAGE_SELECT;
2461 }
2462
2463 /* Page is shifted left, PHY expects (page x 32) */
2464 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2465 (page << page_shift));
2466 if (ret_val)
2467 goto out;
2468 }
2469
2470 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2471 data);
2472
2473out:
2474 hw->phy.ops.release(hw);
2475 return ret_val;
2476}
2477
2478/**
2479 * e1000e_read_phy_reg_bm - Read BM PHY register
2480 * @hw: pointer to the HW structure
2481 * @offset: register offset to be read
2482 * @data: pointer to the read data
2483 *
2484 * Acquires semaphore, if necessary, then reads the PHY register at offset
2485 * and storing the retrieved information in data. Release any acquired
2486 * semaphores before exiting.
2487 **/
2488s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2489{
2490 s32 ret_val;
2491 u32 page = offset >> IGP_PAGE_SHIFT;
2492
2493 ret_val = hw->phy.ops.acquire(hw);
2494 if (ret_val)
2495 return ret_val;
2496
2497 /* Page 800 works differently than the rest so it has its own func */
2498 if (page == BM_WUC_PAGE) {
2499 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2500 true, false);
2501 goto out;
2502 }
2503
2504 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2505
2506 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2507 u32 page_shift, page_select;
2508
2509 /*
2510 * Page select is register 31 for phy address 1 and 22 for
2511 * phy address 2 and 3. Page select is shifted only for
2512 * phy address 1.
2513 */
2514 if (hw->phy.addr == 1) {
2515 page_shift = IGP_PAGE_SHIFT;
2516 page_select = IGP01E1000_PHY_PAGE_SELECT;
2517 } else {
2518 page_shift = 0;
2519 page_select = BM_PHY_PAGE_SELECT;
2520 }
2521
2522 /* Page is shifted left, PHY expects (page x 32) */
2523 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2524 (page << page_shift));
2525 if (ret_val)
2526 goto out;
2527 }
2528
2529 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2530 data);
2531out:
2532 hw->phy.ops.release(hw);
2533 return ret_val;
2534}
2535
2536/**
2537 * e1000e_read_phy_reg_bm2 - Read BM PHY register
2538 * @hw: pointer to the HW structure
2539 * @offset: register offset to be read
2540 * @data: pointer to the read data
2541 *
2542 * Acquires semaphore, if necessary, then reads the PHY register at offset
2543 * and storing the retrieved information in data. Release any acquired
2544 * semaphores before exiting.
2545 **/
2546s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2547{
2548 s32 ret_val;
2549 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2550
2551 ret_val = hw->phy.ops.acquire(hw);
2552 if (ret_val)
2553 return ret_val;
2554
2555 /* Page 800 works differently than the rest so it has its own func */
2556 if (page == BM_WUC_PAGE) {
2557 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2558 true, false);
2559 goto out;
2560 }
2561
2562 hw->phy.addr = 1;
2563
2564 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2565
2566 /* Page is shifted left, PHY expects (page x 32) */
2567 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2568 page);
2569
2570 if (ret_val)
2571 goto out;
2572 }
2573
2574 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2575 data);
2576out:
2577 hw->phy.ops.release(hw);
2578 return ret_val;
2579}
2580
2581/**
2582 * e1000e_write_phy_reg_bm2 - Write BM PHY register
2583 * @hw: pointer to the HW structure
2584 * @offset: register offset to write to
2585 * @data: data to write at register offset
2586 *
2587 * Acquires semaphore, if necessary, then writes the data to PHY register
2588 * at the offset. Release any acquired semaphores before exiting.
2589 **/
2590s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2591{
2592 s32 ret_val;
2593 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2594
2595 ret_val = hw->phy.ops.acquire(hw);
2596 if (ret_val)
2597 return ret_val;
2598
2599 /* Page 800 works differently than the rest so it has its own func */
2600 if (page == BM_WUC_PAGE) {
2601 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2602 false, false);
2603 goto out;
2604 }
2605
2606 hw->phy.addr = 1;
2607
2608 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2609 /* Page is shifted left, PHY expects (page x 32) */
2610 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2611 page);
2612
2613 if (ret_val)
2614 goto out;
2615 }
2616
2617 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2618 data);
2619
2620out:
2621 hw->phy.ops.release(hw);
2622 return ret_val;
2623}
2624
2625/**
2626 * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
2627 * @hw: pointer to the HW structure
2628 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
2629 *
2630 * Assumes semaphore already acquired and phy_reg points to a valid memory
2631 * address to store contents of the BM_WUC_ENABLE_REG register.
2632 **/
2633s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
2634{
2635 s32 ret_val;
2636 u16 temp;
2637
2638 /* All page select, port ctrl and wakeup registers use phy address 1 */
2639 hw->phy.addr = 1;
2640
2641 /* Select Port Control Registers page */
2642 ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2643 if (ret_val) {
2644 e_dbg("Could not set Port Control page\n");
2645 goto out;
2646 }
2647
2648 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2649 if (ret_val) {
2650 e_dbg("Could not read PHY register %d.%d\n",
2651 BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
2652 goto out;
2653 }
2654
2655 /*
2656 * Enable both PHY wakeup mode and Wakeup register page writes.
2657 * Prevent a power state change by disabling ME and Host PHY wakeup.
2658 */
2659 temp = *phy_reg;
2660 temp |= BM_WUC_ENABLE_BIT;
2661 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
2662
2663 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
2664 if (ret_val) {
2665 e_dbg("Could not write PHY register %d.%d\n",
2666 BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
2667 goto out;
2668 }
2669
2670 /* Select Host Wakeup Registers page */
2671 ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
2672
2673 /* caller now able to write registers on the Wakeup registers page */
2674out:
2675 return ret_val;
2676}
2677
2678/**
2679 * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
2680 * @hw: pointer to the HW structure
2681 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
2682 *
2683 * Restore BM_WUC_ENABLE_REG to its original value.
2684 *
2685 * Assumes semaphore already acquired and *phy_reg is the contents of the
2686 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
2687 * caller.
2688 **/
2689s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
2690{
2691 s32 ret_val = 0;
2692
2693 /* Select Port Control Registers page */
2694 ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2695 if (ret_val) {
2696 e_dbg("Could not set Port Control page\n");
2697 goto out;
2698 }
2699
2700 /* Restore 769.17 to its original value */
2701 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
2702 if (ret_val)
2703 e_dbg("Could not restore PHY register %d.%d\n",
2704 BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
2705out:
2706 return ret_val;
2707}
2708
2709/**
2710 * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
2711 * @hw: pointer to the HW structure
2712 * @offset: register offset to be read or written
2713 * @data: pointer to the data to read or write
2714 * @read: determines if operation is read or write
2715 * @page_set: BM_WUC_PAGE already set and access enabled
2716 *
2717 * Read the PHY register at offset and store the retrieved information in
2718 * data, or write data to PHY register at offset. Note the procedure to
2719 * access the PHY wakeup registers is different than reading the other PHY
2720 * registers. It works as such:
2721 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
2722 * 2) Set page to 800 for host (801 if we were manageability)
2723 * 3) Write the address using the address opcode (0x11)
2724 * 4) Read or write the data using the data opcode (0x12)
2725 * 5) Restore 769.17.2 to its original value
2726 *
2727 * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
2728 * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
2729 *
2730 * Assumes semaphore is already acquired. When page_set==true, assumes
2731 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
2732 * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
2733 **/
2734static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2735 u16 *data, bool read, bool page_set)
2736{
2737 s32 ret_val;
2738 u16 reg = BM_PHY_REG_NUM(offset);
2739 u16 page = BM_PHY_REG_PAGE(offset);
2740 u16 phy_reg = 0;
2741
2742 /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
2743 if ((hw->mac.type == e1000_pchlan) &&
2744 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2745 e_dbg("Attempting to access page %d while gig enabled.\n",
2746 page);
2747
2748 if (!page_set) {
2749 /* Enable access to PHY wakeup registers */
2750 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2751 if (ret_val) {
2752 e_dbg("Could not enable PHY wakeup reg access\n");
2753 goto out;
2754 }
2755 }
2756
2757 e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
2758
2759 /* Write the Wakeup register page offset value using opcode 0x11 */
2760 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
2761 if (ret_val) {
2762 e_dbg("Could not write address opcode to page %d\n", page);
2763 goto out;
2764 }
2765
2766 if (read) {
2767 /* Read the Wakeup register page value using opcode 0x12 */
2768 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2769 data);
2770 } else {
2771 /* Write the Wakeup register page value using opcode 0x12 */
2772 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2773 *data);
2774 }
2775
2776 if (ret_val) {
2777 e_dbg("Could not access PHY reg %d.%d\n", page, reg);
2778 goto out;
2779 }
2780
2781 if (!page_set)
2782 ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2783
2784out:
2785 return ret_val;
2786}
2787
2788/**
2789 * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
2790 * @hw: pointer to the HW structure
2791 *
2792 * In the case of a PHY power down to save power, or to turn off link during a
2793 * driver unload, or wake on lan is not enabled, restore the link to previous
2794 * settings.
2795 **/
2796void e1000_power_up_phy_copper(struct e1000_hw *hw)
2797{
2798 u16 mii_reg = 0;
2799
2800 /* The PHY will retain its settings across a power down/up cycle */
2801 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2802 mii_reg &= ~MII_CR_POWER_DOWN;
2803 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2804}
2805
2806/**
2807 * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
2808 * @hw: pointer to the HW structure
2809 *
2810 * In the case of a PHY power down to save power, or to turn off link during a
2811 * driver unload, or wake on lan is not enabled, restore the link to previous
2812 * settings.
2813 **/
2814void e1000_power_down_phy_copper(struct e1000_hw *hw)
2815{
2816 u16 mii_reg = 0;
2817
2818 /* The PHY will retain its settings across a power down/up cycle */
2819 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2820 mii_reg |= MII_CR_POWER_DOWN;
2821 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2822 usleep_range(1000, 2000);
2823}
2824
2825/**
2826 * e1000e_commit_phy - Soft PHY reset
2827 * @hw: pointer to the HW structure
2828 *
2829 * Performs a soft PHY reset on those that apply. This is a function pointer
2830 * entry point called by drivers.
2831 **/
2832s32 e1000e_commit_phy(struct e1000_hw *hw)
2833{
2834 if (hw->phy.ops.commit)
2835 return hw->phy.ops.commit(hw);
2836
2837 return 0;
2838}
2839
2840/**
2841 * e1000_set_d0_lplu_state - Sets low power link up state for D0
2842 * @hw: pointer to the HW structure
2843 * @active: boolean used to enable/disable lplu
2844 *
2845 * Success returns 0, Failure returns 1
2846 *
2847 * The low power link up (lplu) state is set to the power management level D0
2848 * and SmartSpeed is disabled when active is true, else clear lplu for D0
2849 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
2850 * is used during Dx states where the power conservation is most important.
2851 * During driver activity, SmartSpeed should be enabled so performance is
2852 * maintained. This is a function pointer entry point called by drivers.
2853 **/
2854static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2855{
2856 if (hw->phy.ops.set_d0_lplu_state)
2857 return hw->phy.ops.set_d0_lplu_state(hw, active);
2858
2859 return 0;
2860}
2861
2862/**
2863 * __e1000_read_phy_reg_hv - Read HV PHY register
2864 * @hw: pointer to the HW structure
2865 * @offset: register offset to be read
2866 * @data: pointer to the read data
2867 * @locked: semaphore has already been acquired or not
2868 *
2869 * Acquires semaphore, if necessary, then reads the PHY register at offset
2870 * and stores the retrieved information in data. Release any acquired
2871 * semaphore before exiting.
2872 **/
2873static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2874 bool locked, bool page_set)
2875{
2876 s32 ret_val;
2877 u16 page = BM_PHY_REG_PAGE(offset);
2878 u16 reg = BM_PHY_REG_NUM(offset);
2879 u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2880
2881 if (!locked) {
2882 ret_val = hw->phy.ops.acquire(hw);
2883 if (ret_val)
2884 return ret_val;
2885 }
2886
2887 /* Page 800 works differently than the rest so it has its own func */
2888 if (page == BM_WUC_PAGE) {
2889 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2890 true, page_set);
2891 goto out;
2892 }
2893
2894 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2895 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2896 data, true);
2897 goto out;
2898 }
2899
2900 if (!page_set) {
2901 if (page == HV_INTC_FC_PAGE_START)
2902 page = 0;
2903
2904 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2905 /* Page is shifted left, PHY expects (page x 32) */
2906 ret_val = e1000_set_page_igp(hw,
2907 (page << IGP_PAGE_SHIFT));
2908
2909 hw->phy.addr = phy_addr;
2910
2911 if (ret_val)
2912 goto out;
2913 }
2914 }
2915
2916 e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
2917 page << IGP_PAGE_SHIFT, reg);
2918
2919 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2920 data);
2921out:
2922 if (!locked)
2923 hw->phy.ops.release(hw);
2924
2925 return ret_val;
2926}
2927
2928/**
2929 * e1000_read_phy_reg_hv - Read HV PHY register
2930 * @hw: pointer to the HW structure
2931 * @offset: register offset to be read
2932 * @data: pointer to the read data
2933 *
2934 * Acquires semaphore then reads the PHY register at offset and stores
2935 * the retrieved information in data. Release the acquired semaphore
2936 * before exiting.
2937 **/
2938s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2939{
2940 return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
2941}
2942
2943/**
2944 * e1000_read_phy_reg_hv_locked - Read HV PHY register
2945 * @hw: pointer to the HW structure
2946 * @offset: register offset to be read
2947 * @data: pointer to the read data
2948 *
2949 * Reads the PHY register at offset and stores the retrieved information
2950 * in data. Assumes semaphore already acquired.
2951 **/
2952s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
2953{
2954 return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
2955}
2956
2957/**
2958 * e1000_read_phy_reg_page_hv - Read HV PHY register
2959 * @hw: pointer to the HW structure
2960 * @offset: register offset to write to
2961 * @data: data to write at register offset
2962 *
2963 * Reads the PHY register at offset and stores the retrieved information
2964 * in data. Assumes semaphore already acquired and page already set.
2965 **/
2966s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2967{
2968 return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
2969}
2970
2971/**
2972 * __e1000_write_phy_reg_hv - Write HV PHY register
2973 * @hw: pointer to the HW structure
2974 * @offset: register offset to write to
2975 * @data: data to write at register offset
2976 * @locked: semaphore has already been acquired or not
2977 *
2978 * Acquires semaphore, if necessary, then writes the data to PHY register
2979 * at the offset. Release any acquired semaphores before exiting.
2980 **/
2981static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2982 bool locked, bool page_set)
2983{
2984 s32 ret_val;
2985 u16 page = BM_PHY_REG_PAGE(offset);
2986 u16 reg = BM_PHY_REG_NUM(offset);
2987 u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2988
2989 if (!locked) {
2990 ret_val = hw->phy.ops.acquire(hw);
2991 if (ret_val)
2992 return ret_val;
2993 }
2994
2995 /* Page 800 works differently than the rest so it has its own func */
2996 if (page == BM_WUC_PAGE) {
2997 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2998 false, page_set);
2999 goto out;
3000 }
3001
3002 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
3003 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
3004 &data, false);
3005 goto out;
3006 }
3007
3008 if (!page_set) {
3009 if (page == HV_INTC_FC_PAGE_START)
3010 page = 0;
3011
3012 /*
3013 * Workaround MDIO accesses being disabled after entering IEEE
3014 * Power Down (when bit 11 of the PHY Control register is set)
3015 */
3016 if ((hw->phy.type == e1000_phy_82578) &&
3017 (hw->phy.revision >= 1) &&
3018 (hw->phy.addr == 2) &&
3019 ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
3020 u16 data2 = 0x7EFF;
3021 ret_val = e1000_access_phy_debug_regs_hv(hw,
3022 (1 << 6) | 0x3,
3023 &data2, false);
3024 if (ret_val)
3025 goto out;
3026 }
3027
3028 if (reg > MAX_PHY_MULTI_PAGE_REG) {
3029 /* Page is shifted left, PHY expects (page x 32) */
3030 ret_val = e1000_set_page_igp(hw,
3031 (page << IGP_PAGE_SHIFT));
3032
3033 hw->phy.addr = phy_addr;
3034
3035 if (ret_val)
3036 goto out;
3037 }
3038 }
3039
3040 e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
3041 page << IGP_PAGE_SHIFT, reg);
3042
3043 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
3044 data);
3045
3046out:
3047 if (!locked)
3048 hw->phy.ops.release(hw);
3049
3050 return ret_val;
3051}
3052
3053/**
3054 * e1000_write_phy_reg_hv - Write HV PHY register
3055 * @hw: pointer to the HW structure
3056 * @offset: register offset to write to
3057 * @data: data to write at register offset
3058 *
3059 * Acquires semaphore then writes the data to PHY register at the offset.
3060 * Release the acquired semaphores before exiting.
3061 **/
3062s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
3063{
3064 return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
3065}
3066
3067/**
3068 * e1000_write_phy_reg_hv_locked - Write HV PHY register
3069 * @hw: pointer to the HW structure
3070 * @offset: register offset to write to
3071 * @data: data to write at register offset
3072 *
3073 * Writes the data to PHY register at the offset. Assumes semaphore
3074 * already acquired.
3075 **/
3076s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
3077{
3078 return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
3079}
3080
3081/**
3082 * e1000_write_phy_reg_page_hv - Write HV PHY register
3083 * @hw: pointer to the HW structure
3084 * @offset: register offset to write to
3085 * @data: data to write at register offset
3086 *
3087 * Writes the data to PHY register at the offset. Assumes semaphore
3088 * already acquired and page already set.
3089 **/
3090s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
3091{
3092 return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
3093}
3094
3095/**
3096 * e1000_get_phy_addr_for_hv_page - Get PHY address based on page
3097 * @page: page to be accessed
3098 **/
3099static u32 e1000_get_phy_addr_for_hv_page(u32 page)
3100{
3101 u32 phy_addr = 2;
3102
3103 if (page >= HV_INTC_FC_PAGE_START)
3104 phy_addr = 1;
3105
3106 return phy_addr;
3107}
3108
3109/**
3110 * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
3111 * @hw: pointer to the HW structure
3112 * @offset: register offset to be read or written
3113 * @data: pointer to the data to be read or written
3114 * @read: determines if operation is read or write
3115 *
3116 * Reads the PHY register at offset and stores the retreived information
3117 * in data. Assumes semaphore already acquired. Note that the procedure
3118 * to access these regs uses the address port and data port to read/write.
3119 * These accesses done with PHY address 2 and without using pages.
3120 **/
3121static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
3122 u16 *data, bool read)
3123{
3124 s32 ret_val;
3125 u32 addr_reg = 0;
3126 u32 data_reg = 0;
3127
3128 /* This takes care of the difference with desktop vs mobile phy */
3129 addr_reg = (hw->phy.type == e1000_phy_82578) ?
3130 I82578_ADDR_REG : I82577_ADDR_REG;
3131 data_reg = addr_reg + 1;
3132
3133 /* All operations in this function are phy address 2 */
3134 hw->phy.addr = 2;
3135
3136 /* masking with 0x3F to remove the page from offset */
3137 ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
3138 if (ret_val) {
3139 e_dbg("Could not write the Address Offset port register\n");
3140 goto out;
3141 }
3142
3143 /* Read or write the data value next */
3144 if (read)
3145 ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
3146 else
3147 ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
3148
3149 if (ret_val) {
3150 e_dbg("Could not access the Data port register\n");
3151 goto out;
3152 }
3153
3154out:
3155 return ret_val;
3156}
3157
3158/**
3159 * e1000_link_stall_workaround_hv - Si workaround
3160 * @hw: pointer to the HW structure
3161 *
3162 * This function works around a Si bug where the link partner can get
3163 * a link up indication before the PHY does. If small packets are sent
3164 * by the link partner they can be placed in the packet buffer without
3165 * being properly accounted for by the PHY and will stall preventing
3166 * further packets from being received. The workaround is to clear the
3167 * packet buffer after the PHY detects link up.
3168 **/
3169s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3170{
3171 s32 ret_val = 0;
3172 u16 data;
3173
3174 if (hw->phy.type != e1000_phy_82578)
3175 goto out;
3176
3177 /* Do not apply workaround if in PHY loopback bit 14 set */
3178 e1e_rphy(hw, PHY_CONTROL, &data);
3179 if (data & PHY_CONTROL_LB)
3180 goto out;
3181
3182 /* check if link is up and at 1Gbps */
3183 ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
3184 if (ret_val)
3185 goto out;
3186
3187 data &= BM_CS_STATUS_LINK_UP |
3188 BM_CS_STATUS_RESOLVED |
3189 BM_CS_STATUS_SPEED_MASK;
3190
3191 if (data != (BM_CS_STATUS_LINK_UP |
3192 BM_CS_STATUS_RESOLVED |
3193 BM_CS_STATUS_SPEED_1000))
3194 goto out;
3195
3196 mdelay(200);
3197
3198 /* flush the packets in the fifo buffer */
3199 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
3200 HV_MUX_DATA_CTRL_FORCE_SPEED);
3201 if (ret_val)
3202 goto out;
3203
3204 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
3205
3206out:
3207 return ret_val;
3208}
3209
3210/**
3211 * e1000_check_polarity_82577 - Checks the polarity.
3212 * @hw: pointer to the HW structure
3213 *
3214 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
3215 *
3216 * Polarity is determined based on the PHY specific status register.
3217 **/
3218s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3219{
3220 struct e1000_phy_info *phy = &hw->phy;
3221 s32 ret_val;
3222 u16 data;
3223
3224 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3225
3226 if (!ret_val)
3227 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
3228 ? e1000_rev_polarity_reversed
3229 : e1000_rev_polarity_normal;
3230
3231 return ret_val;
3232}
3233
3234/**
3235 * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
3236 * @hw: pointer to the HW structure
3237 *
3238 * Calls the PHY setup function to force speed and duplex.
3239 **/
3240s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3241{
3242 struct e1000_phy_info *phy = &hw->phy;
3243 s32 ret_val;
3244 u16 phy_data;
3245 bool link;
3246
3247 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
3248 if (ret_val)
3249 goto out;
3250
3251 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
3252
3253 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
3254 if (ret_val)
3255 goto out;
3256
3257 udelay(1);
3258
3259 if (phy->autoneg_wait_to_complete) {
3260 e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
3261
3262 ret_val = e1000e_phy_has_link_generic(hw,
3263 PHY_FORCE_LIMIT,
3264 100000,
3265 &link);
3266 if (ret_val)
3267 goto out;
3268
3269 if (!link)
3270 e_dbg("Link taking longer than expected.\n");
3271
3272 /* Try once more */
3273 ret_val = e1000e_phy_has_link_generic(hw,
3274 PHY_FORCE_LIMIT,
3275 100000,
3276 &link);
3277 if (ret_val)
3278 goto out;
3279 }
3280
3281out:
3282 return ret_val;
3283}
3284
3285/**
3286 * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
3287 * @hw: pointer to the HW structure
3288 *
3289 * Read PHY status to determine if link is up. If link is up, then
3290 * set/determine 10base-T extended distance and polarity correction. Read
3291 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
3292 * determine on the cable length, local and remote receiver.
3293 **/
3294s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3295{
3296 struct e1000_phy_info *phy = &hw->phy;
3297 s32 ret_val;
3298 u16 data;
3299 bool link;
3300
3301 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3302 if (ret_val)
3303 goto out;
3304
3305 if (!link) {
3306 e_dbg("Phy info is only valid if link is up\n");
3307 ret_val = -E1000_ERR_CONFIG;
3308 goto out;
3309 }
3310
3311 phy->polarity_correction = true;
3312
3313 ret_val = e1000_check_polarity_82577(hw);
3314 if (ret_val)
3315 goto out;
3316
3317 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3318 if (ret_val)
3319 goto out;
3320
3321 phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
3322
3323 if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
3324 I82577_PHY_STATUS2_SPEED_1000MBPS) {
3325 ret_val = hw->phy.ops.get_cable_length(hw);
3326 if (ret_val)
3327 goto out;
3328
3329 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
3330 if (ret_val)
3331 goto out;
3332
3333 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
3334 ? e1000_1000t_rx_status_ok
3335 : e1000_1000t_rx_status_not_ok;
3336
3337 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
3338 ? e1000_1000t_rx_status_ok
3339 : e1000_1000t_rx_status_not_ok;
3340 } else {
3341 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
3342 phy->local_rx = e1000_1000t_rx_status_undefined;
3343 phy->remote_rx = e1000_1000t_rx_status_undefined;
3344 }
3345
3346out:
3347 return ret_val;
3348}
3349
3350/**
3351 * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
3352 * @hw: pointer to the HW structure
3353 *
3354 * Reads the diagnostic status register and verifies result is valid before
3355 * placing it in the phy_cable_length field.
3356 **/
3357s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3358{
3359 struct e1000_phy_info *phy = &hw->phy;
3360 s32 ret_val;
3361 u16 phy_data, length;
3362
3363 ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
3364 if (ret_val)
3365 goto out;
3366
3367 length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
3368 I82577_DSTATUS_CABLE_LENGTH_SHIFT;
3369
3370 if (length == E1000_CABLE_LENGTH_UNDEFINED)
3371 ret_val = -E1000_ERR_PHY;
3372
3373 phy->cable_length = length;
3374
3375out:
3376 return ret_val;
3377}