aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/Makefile37
-rw-r--r--drivers/net/igb/e1000_82575.c2084
-rw-r--r--drivers/net/igb/e1000_82575.h258
-rw-r--r--drivers/net/igb/e1000_defines.h834
-rw-r--r--drivers/net/igb/e1000_hw.h529
-rw-r--r--drivers/net/igb/e1000_mac.c1421
-rw-r--r--drivers/net/igb/e1000_mac.h90
-rw-r--r--drivers/net/igb/e1000_mbx.c446
-rw-r--r--drivers/net/igb/e1000_mbx.h77
-rw-r--r--drivers/net/igb/e1000_nvm.c713
-rw-r--r--drivers/net/igb/e1000_nvm.h43
-rw-r--r--drivers/net/igb/e1000_phy.c2341
-rw-r--r--drivers/net/igb/e1000_phy.h136
-rw-r--r--drivers/net/igb/e1000_regs.h354
-rw-r--r--drivers/net/igb/igb.h415
-rw-r--r--drivers/net/igb/igb_ethtool.c2201
-rw-r--r--drivers/net/igb/igb_main.c6890
17 files changed, 18869 insertions, 0 deletions
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
new file mode 100644
index 00000000000..c6e4621b626
--- /dev/null
+++ b/drivers/net/igb/Makefile
@@ -0,0 +1,37 @@
1################################################################################
2#
3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2011 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29#
30# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
31#
32
33obj-$(CONFIG_IGB) += igb.o
34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
37
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
new file mode 100644
index 00000000000..c0857bdfb03
--- /dev/null
+++ b/drivers/net/igb/e1000_82575.c
@@ -0,0 +1,2084 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* e1000_82575
29 * e1000_82576
30 */
31
32#include <linux/types.h>
33#include <linux/if_ether.h>
34
35#include "e1000_mac.h"
36#include "e1000_82575.h"
37
38static s32 igb_get_invariants_82575(struct e1000_hw *);
39static s32 igb_acquire_phy_82575(struct e1000_hw *);
40static void igb_release_phy_82575(struct e1000_hw *);
41static s32 igb_acquire_nvm_82575(struct e1000_hw *);
42static void igb_release_nvm_82575(struct e1000_hw *);
43static s32 igb_check_for_link_82575(struct e1000_hw *);
44static s32 igb_get_cfg_done_82575(struct e1000_hw *);
45static s32 igb_init_hw_82575(struct e1000_hw *);
46static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
47static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
48static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
49static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
50static s32 igb_reset_hw_82575(struct e1000_hw *);
51static s32 igb_reset_hw_82580(struct e1000_hw *);
52static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
53static s32 igb_setup_copper_link_82575(struct e1000_hw *);
54static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
55static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
56static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
57static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
58static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
59 u16 *);
60static s32 igb_get_phy_id_82575(struct e1000_hw *);
61static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
62static bool igb_sgmii_active_82575(struct e1000_hw *);
63static s32 igb_reset_init_script_82575(struct e1000_hw *);
64static s32 igb_read_mac_addr_82575(struct e1000_hw *);
65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
67static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
68static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
69static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
70 u16 offset);
71static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
72 u16 offset);
73static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
74static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
75static const u16 e1000_82580_rxpbs_table[] =
76 { 36, 72, 144, 1, 2, 4, 8, 16,
77 35, 70, 140 };
78#define E1000_82580_RXPBS_TABLE_SIZE \
79 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
80
81/**
82 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
83 * @hw: pointer to the HW structure
84 *
85 * Called to determine if the I2C pins are being used for I2C or as an
86 * external MDIO interface since the two options are mutually exclusive.
87 **/
88static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
89{
90 u32 reg = 0;
91 bool ext_mdio = false;
92
93 switch (hw->mac.type) {
94 case e1000_82575:
95 case e1000_82576:
96 reg = rd32(E1000_MDIC);
97 ext_mdio = !!(reg & E1000_MDIC_DEST);
98 break;
99 case e1000_82580:
100 case e1000_i350:
101 reg = rd32(E1000_MDICNFG);
102 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
103 break;
104 default:
105 break;
106 }
107 return ext_mdio;
108}
109
110static s32 igb_get_invariants_82575(struct e1000_hw *hw)
111{
112 struct e1000_phy_info *phy = &hw->phy;
113 struct e1000_nvm_info *nvm = &hw->nvm;
114 struct e1000_mac_info *mac = &hw->mac;
115 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
116 u32 eecd;
117 s32 ret_val;
118 u16 size;
119 u32 ctrl_ext = 0;
120
121 switch (hw->device_id) {
122 case E1000_DEV_ID_82575EB_COPPER:
123 case E1000_DEV_ID_82575EB_FIBER_SERDES:
124 case E1000_DEV_ID_82575GB_QUAD_COPPER:
125 mac->type = e1000_82575;
126 break;
127 case E1000_DEV_ID_82576:
128 case E1000_DEV_ID_82576_NS:
129 case E1000_DEV_ID_82576_NS_SERDES:
130 case E1000_DEV_ID_82576_FIBER:
131 case E1000_DEV_ID_82576_SERDES:
132 case E1000_DEV_ID_82576_QUAD_COPPER:
133 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
134 case E1000_DEV_ID_82576_SERDES_QUAD:
135 mac->type = e1000_82576;
136 break;
137 case E1000_DEV_ID_82580_COPPER:
138 case E1000_DEV_ID_82580_FIBER:
139 case E1000_DEV_ID_82580_QUAD_FIBER:
140 case E1000_DEV_ID_82580_SERDES:
141 case E1000_DEV_ID_82580_SGMII:
142 case E1000_DEV_ID_82580_COPPER_DUAL:
143 case E1000_DEV_ID_DH89XXCC_SGMII:
144 case E1000_DEV_ID_DH89XXCC_SERDES:
145 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
146 case E1000_DEV_ID_DH89XXCC_SFP:
147 mac->type = e1000_82580;
148 break;
149 case E1000_DEV_ID_I350_COPPER:
150 case E1000_DEV_ID_I350_FIBER:
151 case E1000_DEV_ID_I350_SERDES:
152 case E1000_DEV_ID_I350_SGMII:
153 mac->type = e1000_i350;
154 break;
155 default:
156 return -E1000_ERR_MAC_INIT;
157 break;
158 }
159
160 /* Set media type */
161 /*
162 * The 82575 uses bits 22:23 for link mode. The mode can be changed
163 * based on the EEPROM. We cannot rely upon device ID. There
164 * is no distinguishable difference between fiber and internal
165 * SerDes mode on the 82575. There can be an external PHY attached
166 * on the SGMII interface. For this, we'll set sgmii_active to true.
167 */
168 phy->media_type = e1000_media_type_copper;
169 dev_spec->sgmii_active = false;
170
171 ctrl_ext = rd32(E1000_CTRL_EXT);
172 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
173 case E1000_CTRL_EXT_LINK_MODE_SGMII:
174 dev_spec->sgmii_active = true;
175 break;
176 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
177 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
178 hw->phy.media_type = e1000_media_type_internal_serdes;
179 break;
180 default:
181 break;
182 }
183
184 /* Set mta register count */
185 mac->mta_reg_count = 128;
186 /* Set rar entry count */
187 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
188 if (mac->type == e1000_82576)
189 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
190 if (mac->type == e1000_82580)
191 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
192 if (mac->type == e1000_i350)
193 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
194 /* reset */
195 if (mac->type >= e1000_82580)
196 mac->ops.reset_hw = igb_reset_hw_82580;
197 else
198 mac->ops.reset_hw = igb_reset_hw_82575;
199 /* Set if part includes ASF firmware */
200 mac->asf_firmware_present = true;
201 /* Set if manageability features are enabled. */
202 mac->arc_subsystem_valid =
203 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
204 ? true : false;
205 /* enable EEE on i350 parts */
206 if (mac->type == e1000_i350)
207 dev_spec->eee_disable = false;
208 else
209 dev_spec->eee_disable = true;
210 /* physical interface link setup */
211 mac->ops.setup_physical_interface =
212 (hw->phy.media_type == e1000_media_type_copper)
213 ? igb_setup_copper_link_82575
214 : igb_setup_serdes_link_82575;
215
216 /* NVM initialization */
217 eecd = rd32(E1000_EECD);
218
219 nvm->opcode_bits = 8;
220 nvm->delay_usec = 1;
221 switch (nvm->override) {
222 case e1000_nvm_override_spi_large:
223 nvm->page_size = 32;
224 nvm->address_bits = 16;
225 break;
226 case e1000_nvm_override_spi_small:
227 nvm->page_size = 8;
228 nvm->address_bits = 8;
229 break;
230 default:
231 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
232 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
233 break;
234 }
235
236 nvm->type = e1000_nvm_eeprom_spi;
237
238 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
239 E1000_EECD_SIZE_EX_SHIFT);
240
241 /*
242 * Added to a constant, "size" becomes the left-shift value
243 * for setting word_size.
244 */
245 size += NVM_WORD_SIZE_BASE_SHIFT;
246
247 /*
248 * Check for invalid size
249 */
250 if ((hw->mac.type == e1000_82576) && (size > 15)) {
251 printk("igb: The NVM size is not valid, "
252 "defaulting to 32K.\n");
253 size = 15;
254 }
255 nvm->word_size = 1 << size;
256 if (nvm->word_size == (1 << 15))
257 nvm->page_size = 128;
258
259 /* NVM Function Pointers */
260 nvm->ops.acquire = igb_acquire_nvm_82575;
261 if (nvm->word_size < (1 << 15))
262 nvm->ops.read = igb_read_nvm_eerd;
263 else
264 nvm->ops.read = igb_read_nvm_spi;
265
266 nvm->ops.release = igb_release_nvm_82575;
267 switch (hw->mac.type) {
268 case e1000_82580:
269 nvm->ops.validate = igb_validate_nvm_checksum_82580;
270 nvm->ops.update = igb_update_nvm_checksum_82580;
271 break;
272 case e1000_i350:
273 nvm->ops.validate = igb_validate_nvm_checksum_i350;
274 nvm->ops.update = igb_update_nvm_checksum_i350;
275 break;
276 default:
277 nvm->ops.validate = igb_validate_nvm_checksum;
278 nvm->ops.update = igb_update_nvm_checksum;
279 }
280 nvm->ops.write = igb_write_nvm_spi;
281
282 /* if part supports SR-IOV then initialize mailbox parameters */
283 switch (mac->type) {
284 case e1000_82576:
285 case e1000_i350:
286 igb_init_mbx_params_pf(hw);
287 break;
288 default:
289 break;
290 }
291
292 /* setup PHY parameters */
293 if (phy->media_type != e1000_media_type_copper) {
294 phy->type = e1000_phy_none;
295 return 0;
296 }
297
298 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
299 phy->reset_delay_us = 100;
300
301 ctrl_ext = rd32(E1000_CTRL_EXT);
302
303 /* PHY function pointers */
304 if (igb_sgmii_active_82575(hw)) {
305 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
306 ctrl_ext |= E1000_CTRL_I2C_ENA;
307 } else {
308 phy->ops.reset = igb_phy_hw_reset;
309 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
310 }
311
312 wr32(E1000_CTRL_EXT, ctrl_ext);
313 igb_reset_mdicnfg_82580(hw);
314
315 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
316 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
317 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
318 } else if (hw->mac.type >= e1000_82580) {
319 phy->ops.read_reg = igb_read_phy_reg_82580;
320 phy->ops.write_reg = igb_write_phy_reg_82580;
321 } else {
322 phy->ops.read_reg = igb_read_phy_reg_igp;
323 phy->ops.write_reg = igb_write_phy_reg_igp;
324 }
325
326 /* set lan id */
327 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
328 E1000_STATUS_FUNC_SHIFT;
329
330 /* Set phy->phy_addr and phy->id. */
331 ret_val = igb_get_phy_id_82575(hw);
332 if (ret_val)
333 return ret_val;
334
335 /* Verify phy id and set remaining function pointers */
336 switch (phy->id) {
337 case I347AT4_E_PHY_ID:
338 case M88E1112_E_PHY_ID:
339 case M88E1111_I_PHY_ID:
340 phy->type = e1000_phy_m88;
341 phy->ops.get_phy_info = igb_get_phy_info_m88;
342
343 if (phy->id == I347AT4_E_PHY_ID ||
344 phy->id == M88E1112_E_PHY_ID)
345 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
346 else
347 phy->ops.get_cable_length = igb_get_cable_length_m88;
348
349 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
350 break;
351 case IGP03E1000_E_PHY_ID:
352 phy->type = e1000_phy_igp_3;
353 phy->ops.get_phy_info = igb_get_phy_info_igp;
354 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
355 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
356 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
357 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
358 break;
359 case I82580_I_PHY_ID:
360 case I350_I_PHY_ID:
361 phy->type = e1000_phy_82580;
362 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
363 phy->ops.get_cable_length = igb_get_cable_length_82580;
364 phy->ops.get_phy_info = igb_get_phy_info_82580;
365 break;
366 default:
367 return -E1000_ERR_PHY;
368 }
369
370 return 0;
371}
372
373/**
374 * igb_acquire_phy_82575 - Acquire rights to access PHY
375 * @hw: pointer to the HW structure
376 *
377 * Acquire access rights to the correct PHY. This is a
378 * function pointer entry point called by the api module.
379 **/
380static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
381{
382 u16 mask = E1000_SWFW_PHY0_SM;
383
384 if (hw->bus.func == E1000_FUNC_1)
385 mask = E1000_SWFW_PHY1_SM;
386 else if (hw->bus.func == E1000_FUNC_2)
387 mask = E1000_SWFW_PHY2_SM;
388 else if (hw->bus.func == E1000_FUNC_3)
389 mask = E1000_SWFW_PHY3_SM;
390
391 return igb_acquire_swfw_sync_82575(hw, mask);
392}
393
394/**
395 * igb_release_phy_82575 - Release rights to access PHY
396 * @hw: pointer to the HW structure
397 *
398 * A wrapper to release access rights to the correct PHY. This is a
399 * function pointer entry point called by the api module.
400 **/
401static void igb_release_phy_82575(struct e1000_hw *hw)
402{
403 u16 mask = E1000_SWFW_PHY0_SM;
404
405 if (hw->bus.func == E1000_FUNC_1)
406 mask = E1000_SWFW_PHY1_SM;
407 else if (hw->bus.func == E1000_FUNC_2)
408 mask = E1000_SWFW_PHY2_SM;
409 else if (hw->bus.func == E1000_FUNC_3)
410 mask = E1000_SWFW_PHY3_SM;
411
412 igb_release_swfw_sync_82575(hw, mask);
413}
414
415/**
416 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
417 * @hw: pointer to the HW structure
418 * @offset: register offset to be read
419 * @data: pointer to the read data
420 *
421 * Reads the PHY register at offset using the serial gigabit media independent
422 * interface and stores the retrieved information in data.
423 **/
424static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
425 u16 *data)
426{
427 s32 ret_val = -E1000_ERR_PARAM;
428
429 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
430 hw_dbg("PHY Address %u is out of range\n", offset);
431 goto out;
432 }
433
434 ret_val = hw->phy.ops.acquire(hw);
435 if (ret_val)
436 goto out;
437
438 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
439
440 hw->phy.ops.release(hw);
441
442out:
443 return ret_val;
444}
445
446/**
447 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
448 * @hw: pointer to the HW structure
449 * @offset: register offset to write to
450 * @data: data to write at register offset
451 *
452 * Writes the data to PHY register at the offset using the serial gigabit
453 * media independent interface.
454 **/
455static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
456 u16 data)
457{
458 s32 ret_val = -E1000_ERR_PARAM;
459
460
461 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
462 hw_dbg("PHY Address %d is out of range\n", offset);
463 goto out;
464 }
465
466 ret_val = hw->phy.ops.acquire(hw);
467 if (ret_val)
468 goto out;
469
470 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
471
472 hw->phy.ops.release(hw);
473
474out:
475 return ret_val;
476}
477
478/**
479 * igb_get_phy_id_82575 - Retrieve PHY addr and id
480 * @hw: pointer to the HW structure
481 *
482 * Retrieves the PHY address and ID for both PHY's which do and do not use
483 * sgmi interface.
484 **/
485static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
486{
487 struct e1000_phy_info *phy = &hw->phy;
488 s32 ret_val = 0;
489 u16 phy_id;
490 u32 ctrl_ext;
491 u32 mdic;
492
493 /*
494 * For SGMII PHYs, we try the list of possible addresses until
495 * we find one that works. For non-SGMII PHYs
496 * (e.g. integrated copper PHYs), an address of 1 should
497 * work. The result of this function should mean phy->phy_addr
498 * and phy->id are set correctly.
499 */
500 if (!(igb_sgmii_active_82575(hw))) {
501 phy->addr = 1;
502 ret_val = igb_get_phy_id(hw);
503 goto out;
504 }
505
506 if (igb_sgmii_uses_mdio_82575(hw)) {
507 switch (hw->mac.type) {
508 case e1000_82575:
509 case e1000_82576:
510 mdic = rd32(E1000_MDIC);
511 mdic &= E1000_MDIC_PHY_MASK;
512 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
513 break;
514 case e1000_82580:
515 case e1000_i350:
516 mdic = rd32(E1000_MDICNFG);
517 mdic &= E1000_MDICNFG_PHY_MASK;
518 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
519 break;
520 default:
521 ret_val = -E1000_ERR_PHY;
522 goto out;
523 break;
524 }
525 ret_val = igb_get_phy_id(hw);
526 goto out;
527 }
528
529 /* Power on sgmii phy if it is disabled */
530 ctrl_ext = rd32(E1000_CTRL_EXT);
531 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
532 wrfl();
533 msleep(300);
534
535 /*
536 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
537 * Therefore, we need to test 1-7
538 */
539 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
540 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
541 if (ret_val == 0) {
542 hw_dbg("Vendor ID 0x%08X read at address %u\n",
543 phy_id, phy->addr);
544 /*
545 * At the time of this writing, The M88 part is
546 * the only supported SGMII PHY product.
547 */
548 if (phy_id == M88_VENDOR)
549 break;
550 } else {
551 hw_dbg("PHY address %u was unreadable\n", phy->addr);
552 }
553 }
554
555 /* A valid PHY type couldn't be found. */
556 if (phy->addr == 8) {
557 phy->addr = 0;
558 ret_val = -E1000_ERR_PHY;
559 goto out;
560 } else {
561 ret_val = igb_get_phy_id(hw);
562 }
563
564 /* restore previous sfp cage power state */
565 wr32(E1000_CTRL_EXT, ctrl_ext);
566
567out:
568 return ret_val;
569}
570
571/**
572 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
573 * @hw: pointer to the HW structure
574 *
575 * Resets the PHY using the serial gigabit media independent interface.
576 **/
577static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
578{
579 s32 ret_val;
580
581 /*
582 * This isn't a true "hard" reset, but is the only reset
583 * available to us at this time.
584 */
585
586 hw_dbg("Soft resetting SGMII attached PHY...\n");
587
588 /*
589 * SFP documentation requires the following to configure the SPF module
590 * to work on SGMII. No further documentation is given.
591 */
592 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
593 if (ret_val)
594 goto out;
595
596 ret_val = igb_phy_sw_reset(hw);
597
598out:
599 return ret_val;
600}
601
602/**
603 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
604 * @hw: pointer to the HW structure
605 * @active: true to enable LPLU, false to disable
606 *
607 * Sets the LPLU D0 state according to the active flag. When
608 * activating LPLU this function also disables smart speed
609 * and vice versa. LPLU will not be activated unless the
610 * device autonegotiation advertisement meets standards of
611 * either 10 or 10/100 or 10/100/1000 at all duplexes.
612 * This is a function pointer entry point only called by
613 * PHY setup routines.
614 **/
615static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
616{
617 struct e1000_phy_info *phy = &hw->phy;
618 s32 ret_val;
619 u16 data;
620
621 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
622 if (ret_val)
623 goto out;
624
625 if (active) {
626 data |= IGP02E1000_PM_D0_LPLU;
627 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
628 data);
629 if (ret_val)
630 goto out;
631
632 /* When LPLU is enabled, we should disable SmartSpeed */
633 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
634 &data);
635 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
636 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
637 data);
638 if (ret_val)
639 goto out;
640 } else {
641 data &= ~IGP02E1000_PM_D0_LPLU;
642 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
643 data);
644 /*
645 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
646 * during Dx states where the power conservation is most
647 * important. During driver activity we should enable
648 * SmartSpeed, so performance is maintained.
649 */
650 if (phy->smart_speed == e1000_smart_speed_on) {
651 ret_val = phy->ops.read_reg(hw,
652 IGP01E1000_PHY_PORT_CONFIG, &data);
653 if (ret_val)
654 goto out;
655
656 data |= IGP01E1000_PSCFR_SMART_SPEED;
657 ret_val = phy->ops.write_reg(hw,
658 IGP01E1000_PHY_PORT_CONFIG, data);
659 if (ret_val)
660 goto out;
661 } else if (phy->smart_speed == e1000_smart_speed_off) {
662 ret_val = phy->ops.read_reg(hw,
663 IGP01E1000_PHY_PORT_CONFIG, &data);
664 if (ret_val)
665 goto out;
666
667 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
668 ret_val = phy->ops.write_reg(hw,
669 IGP01E1000_PHY_PORT_CONFIG, data);
670 if (ret_val)
671 goto out;
672 }
673 }
674
675out:
676 return ret_val;
677}
678
679/**
680 * igb_acquire_nvm_82575 - Request for access to EEPROM
681 * @hw: pointer to the HW structure
682 *
683 * Acquire the necessary semaphores for exclusive access to the EEPROM.
684 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
685 * Return successful if access grant bit set, else clear the request for
686 * EEPROM access and return -E1000_ERR_NVM (-1).
687 **/
688static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
689{
690 s32 ret_val;
691
692 ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
693 if (ret_val)
694 goto out;
695
696 ret_val = igb_acquire_nvm(hw);
697
698 if (ret_val)
699 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
700
701out:
702 return ret_val;
703}
704
705/**
706 * igb_release_nvm_82575 - Release exclusive access to EEPROM
707 * @hw: pointer to the HW structure
708 *
709 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
710 * then release the semaphores acquired.
711 **/
712static void igb_release_nvm_82575(struct e1000_hw *hw)
713{
714 igb_release_nvm(hw);
715 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
716}
717
718/**
719 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
720 * @hw: pointer to the HW structure
721 * @mask: specifies which semaphore to acquire
722 *
723 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
724 * will also specify which port we're acquiring the lock for.
725 **/
726static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
727{
728 u32 swfw_sync;
729 u32 swmask = mask;
730 u32 fwmask = mask << 16;
731 s32 ret_val = 0;
732 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
733
734 while (i < timeout) {
735 if (igb_get_hw_semaphore(hw)) {
736 ret_val = -E1000_ERR_SWFW_SYNC;
737 goto out;
738 }
739
740 swfw_sync = rd32(E1000_SW_FW_SYNC);
741 if (!(swfw_sync & (fwmask | swmask)))
742 break;
743
744 /*
745 * Firmware currently using resource (fwmask)
746 * or other software thread using resource (swmask)
747 */
748 igb_put_hw_semaphore(hw);
749 mdelay(5);
750 i++;
751 }
752
753 if (i == timeout) {
754 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
755 ret_val = -E1000_ERR_SWFW_SYNC;
756 goto out;
757 }
758
759 swfw_sync |= swmask;
760 wr32(E1000_SW_FW_SYNC, swfw_sync);
761
762 igb_put_hw_semaphore(hw);
763
764out:
765 return ret_val;
766}
767
768/**
769 * igb_release_swfw_sync_82575 - Release SW/FW semaphore
770 * @hw: pointer to the HW structure
771 * @mask: specifies which semaphore to acquire
772 *
773 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
774 * will also specify which port we're releasing the lock for.
775 **/
776static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
777{
778 u32 swfw_sync;
779
780 while (igb_get_hw_semaphore(hw) != 0);
781 /* Empty */
782
783 swfw_sync = rd32(E1000_SW_FW_SYNC);
784 swfw_sync &= ~mask;
785 wr32(E1000_SW_FW_SYNC, swfw_sync);
786
787 igb_put_hw_semaphore(hw);
788}
789
790/**
791 * igb_get_cfg_done_82575 - Read config done bit
792 * @hw: pointer to the HW structure
793 *
794 * Read the management control register for the config done bit for
795 * completion status. NOTE: silicon which is EEPROM-less will fail trying
796 * to read the config done bit, so an error is *ONLY* logged and returns
797 * 0. If we were to return with error, EEPROM-less silicon
798 * would not be able to be reset or change link.
799 **/
800static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
801{
802 s32 timeout = PHY_CFG_TIMEOUT;
803 s32 ret_val = 0;
804 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
805
806 if (hw->bus.func == 1)
807 mask = E1000_NVM_CFG_DONE_PORT_1;
808 else if (hw->bus.func == E1000_FUNC_2)
809 mask = E1000_NVM_CFG_DONE_PORT_2;
810 else if (hw->bus.func == E1000_FUNC_3)
811 mask = E1000_NVM_CFG_DONE_PORT_3;
812
813 while (timeout) {
814 if (rd32(E1000_EEMNGCTL) & mask)
815 break;
816 msleep(1);
817 timeout--;
818 }
819 if (!timeout)
820 hw_dbg("MNG configuration cycle has not completed.\n");
821
822 /* If EEPROM is not marked present, init the PHY manually */
823 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
824 (hw->phy.type == e1000_phy_igp_3))
825 igb_phy_init_script_igp3(hw);
826
827 return ret_val;
828}
829
830/**
831 * igb_check_for_link_82575 - Check for link
832 * @hw: pointer to the HW structure
833 *
834 * If sgmii is enabled, then use the pcs register to determine link, otherwise
835 * use the generic interface for determining link.
836 **/
837static s32 igb_check_for_link_82575(struct e1000_hw *hw)
838{
839 s32 ret_val;
840 u16 speed, duplex;
841
842 if (hw->phy.media_type != e1000_media_type_copper) {
843 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
844 &duplex);
845 /*
846 * Use this flag to determine if link needs to be checked or
847 * not. If we have link clear the flag so that we do not
848 * continue to check for link.
849 */
850 hw->mac.get_link_status = !hw->mac.serdes_has_link;
851 } else {
852 ret_val = igb_check_for_copper_link(hw);
853 }
854
855 return ret_val;
856}
857
858/**
859 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
860 * @hw: pointer to the HW structure
861 **/
862void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
863{
864 u32 reg;
865
866
867 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
868 !igb_sgmii_active_82575(hw))
869 return;
870
871 /* Enable PCS to turn on link */
872 reg = rd32(E1000_PCS_CFG0);
873 reg |= E1000_PCS_CFG_PCS_EN;
874 wr32(E1000_PCS_CFG0, reg);
875
876 /* Power up the laser */
877 reg = rd32(E1000_CTRL_EXT);
878 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
879 wr32(E1000_CTRL_EXT, reg);
880
881 /* flush the write to verify completion */
882 wrfl();
883 msleep(1);
884}
885
886/**
887 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
888 * @hw: pointer to the HW structure
889 * @speed: stores the current speed
890 * @duplex: stores the current duplex
891 *
892 * Using the physical coding sub-layer (PCS), retrieve the current speed and
893 * duplex, then store the values in the pointers provided.
894 **/
895static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
896 u16 *duplex)
897{
898 struct e1000_mac_info *mac = &hw->mac;
899 u32 pcs;
900
901 /* Set up defaults for the return values of this function */
902 mac->serdes_has_link = false;
903 *speed = 0;
904 *duplex = 0;
905
906 /*
907 * Read the PCS Status register for link state. For non-copper mode,
908 * the status register is not accurate. The PCS status register is
909 * used instead.
910 */
911 pcs = rd32(E1000_PCS_LSTAT);
912
913 /*
914 * The link up bit determines when link is up on autoneg. The sync ok
915 * gets set once both sides sync up and agree upon link. Stable link
916 * can be determined by checking for both link up and link sync ok
917 */
918 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
919 mac->serdes_has_link = true;
920
921 /* Detect and store PCS speed */
922 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
923 *speed = SPEED_1000;
924 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
925 *speed = SPEED_100;
926 } else {
927 *speed = SPEED_10;
928 }
929
930 /* Detect and store PCS duplex */
931 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
932 *duplex = FULL_DUPLEX;
933 } else {
934 *duplex = HALF_DUPLEX;
935 }
936 }
937
938 return 0;
939}
940
941/**
942 * igb_shutdown_serdes_link_82575 - Remove link during power down
943 * @hw: pointer to the HW structure
944 *
945 * In the case of fiber serdes, shut down optics and PCS on driver unload
946 * when management pass thru is not enabled.
947 **/
948void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
949{
950 u32 reg;
951
952 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
953 igb_sgmii_active_82575(hw))
954 return;
955
956 if (!igb_enable_mng_pass_thru(hw)) {
957 /* Disable PCS to turn off link */
958 reg = rd32(E1000_PCS_CFG0);
959 reg &= ~E1000_PCS_CFG_PCS_EN;
960 wr32(E1000_PCS_CFG0, reg);
961
962 /* shutdown the laser */
963 reg = rd32(E1000_CTRL_EXT);
964 reg |= E1000_CTRL_EXT_SDP3_DATA;
965 wr32(E1000_CTRL_EXT, reg);
966
967 /* flush the write to verify completion */
968 wrfl();
969 msleep(1);
970 }
971}
972
973/**
974 * igb_reset_hw_82575 - Reset hardware
975 * @hw: pointer to the HW structure
976 *
977 * This resets the hardware into a known state. This is a
978 * function pointer entry point called by the api module.
979 **/
980static s32 igb_reset_hw_82575(struct e1000_hw *hw)
981{
982 u32 ctrl, icr;
983 s32 ret_val;
984
985 /*
986 * Prevent the PCI-E bus from sticking if there is no TLP connection
987 * on the last TLP read/write transaction when MAC is reset.
988 */
989 ret_val = igb_disable_pcie_master(hw);
990 if (ret_val)
991 hw_dbg("PCI-E Master disable polling has failed.\n");
992
993 /* set the completion timeout for interface */
994 ret_val = igb_set_pcie_completion_timeout(hw);
995 if (ret_val) {
996 hw_dbg("PCI-E Set completion timeout has failed.\n");
997 }
998
999 hw_dbg("Masking off all interrupts\n");
1000 wr32(E1000_IMC, 0xffffffff);
1001
1002 wr32(E1000_RCTL, 0);
1003 wr32(E1000_TCTL, E1000_TCTL_PSP);
1004 wrfl();
1005
1006 msleep(10);
1007
1008 ctrl = rd32(E1000_CTRL);
1009
1010 hw_dbg("Issuing a global reset to MAC\n");
1011 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1012
1013 ret_val = igb_get_auto_rd_done(hw);
1014 if (ret_val) {
1015 /*
1016 * When auto config read does not complete, do not
1017 * return with an error. This can happen in situations
1018 * where there is no eeprom and prevents getting link.
1019 */
1020 hw_dbg("Auto Read Done did not complete\n");
1021 }
1022
1023 /* If EEPROM is not present, run manual init scripts */
1024 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1025 igb_reset_init_script_82575(hw);
1026
1027 /* Clear any pending interrupt events. */
1028 wr32(E1000_IMC, 0xffffffff);
1029 icr = rd32(E1000_ICR);
1030
1031 /* Install any alternate MAC address into RAR0 */
1032 ret_val = igb_check_alt_mac_addr(hw);
1033
1034 return ret_val;
1035}
1036
1037/**
1038 * igb_init_hw_82575 - Initialize hardware
1039 * @hw: pointer to the HW structure
1040 *
1041 * This inits the hardware readying it for operation.
1042 **/
1043static s32 igb_init_hw_82575(struct e1000_hw *hw)
1044{
1045 struct e1000_mac_info *mac = &hw->mac;
1046 s32 ret_val;
1047 u16 i, rar_count = mac->rar_entry_count;
1048
1049 /* Initialize identification LED */
1050 ret_val = igb_id_led_init(hw);
1051 if (ret_val) {
1052 hw_dbg("Error initializing identification LED\n");
1053 /* This is not fatal and we should not stop init due to this */
1054 }
1055
1056 /* Disabling VLAN filtering */
1057 hw_dbg("Initializing the IEEE VLAN\n");
1058 igb_clear_vfta(hw);
1059
1060 /* Setup the receive address */
1061 igb_init_rx_addrs(hw, rar_count);
1062
1063 /* Zero out the Multicast HASH table */
1064 hw_dbg("Zeroing the MTA\n");
1065 for (i = 0; i < mac->mta_reg_count; i++)
1066 array_wr32(E1000_MTA, i, 0);
1067
1068 /* Zero out the Unicast HASH table */
1069 hw_dbg("Zeroing the UTA\n");
1070 for (i = 0; i < mac->uta_reg_count; i++)
1071 array_wr32(E1000_UTA, i, 0);
1072
1073 /* Setup link and flow control */
1074 ret_val = igb_setup_link(hw);
1075
1076 /*
1077 * Clear all of the statistics registers (clear on read). It is
1078 * important that we do this after we have tried to establish link
1079 * because the symbol error count will increment wildly if there
1080 * is no link.
1081 */
1082 igb_clear_hw_cntrs_82575(hw);
1083
1084 return ret_val;
1085}
1086
1087/**
1088 * igb_setup_copper_link_82575 - Configure copper link settings
1089 * @hw: pointer to the HW structure
1090 *
1091 * Configures the link for auto-neg or forced speed and duplex. Then we check
1092 * for link, once link is established calls to configure collision distance
1093 * and flow control are called.
1094 **/
1095static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1096{
1097 u32 ctrl;
1098 s32 ret_val;
1099
1100 ctrl = rd32(E1000_CTRL);
1101 ctrl |= E1000_CTRL_SLU;
1102 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1103 wr32(E1000_CTRL, ctrl);
1104
1105 ret_val = igb_setup_serdes_link_82575(hw);
1106 if (ret_val)
1107 goto out;
1108
1109 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1110 /* allow time for SFP cage time to power up phy */
1111 msleep(300);
1112
1113 ret_val = hw->phy.ops.reset(hw);
1114 if (ret_val) {
1115 hw_dbg("Error resetting the PHY.\n");
1116 goto out;
1117 }
1118 }
1119 switch (hw->phy.type) {
1120 case e1000_phy_m88:
1121 if (hw->phy.id == I347AT4_E_PHY_ID ||
1122 hw->phy.id == M88E1112_E_PHY_ID)
1123 ret_val = igb_copper_link_setup_m88_gen2(hw);
1124 else
1125 ret_val = igb_copper_link_setup_m88(hw);
1126 break;
1127 case e1000_phy_igp_3:
1128 ret_val = igb_copper_link_setup_igp(hw);
1129 break;
1130 case e1000_phy_82580:
1131 ret_val = igb_copper_link_setup_82580(hw);
1132 break;
1133 default:
1134 ret_val = -E1000_ERR_PHY;
1135 break;
1136 }
1137
1138 if (ret_val)
1139 goto out;
1140
1141 ret_val = igb_setup_copper_link(hw);
1142out:
1143 return ret_val;
1144}
1145
1146/**
1147 * igb_setup_serdes_link_82575 - Setup link for serdes
1148 * @hw: pointer to the HW structure
1149 *
1150 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1151 * used on copper connections where the serialized gigabit media independent
1152 * interface (sgmii), or serdes fiber is being used. Configures the link
1153 * for auto-negotiation or forces speed/duplex.
1154 **/
1155static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1156{
1157 u32 ctrl_ext, ctrl_reg, reg;
1158 bool pcs_autoneg;
1159 s32 ret_val = E1000_SUCCESS;
1160 u16 data;
1161
1162 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1163 !igb_sgmii_active_82575(hw))
1164 return ret_val;
1165
1166
1167 /*
1168 * On the 82575, SerDes loopback mode persists until it is
1169 * explicitly turned off or a power cycle is performed. A read to
1170 * the register does not indicate its status. Therefore, we ensure
1171 * loopback mode is disabled during initialization.
1172 */
1173 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1174
1175 /* power on the sfp cage if present */
1176 ctrl_ext = rd32(E1000_CTRL_EXT);
1177 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1178 wr32(E1000_CTRL_EXT, ctrl_ext);
1179
1180 ctrl_reg = rd32(E1000_CTRL);
1181 ctrl_reg |= E1000_CTRL_SLU;
1182
1183 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1184 /* set both sw defined pins */
1185 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1186
1187 /* Set switch control to serdes energy detect */
1188 reg = rd32(E1000_CONNSW);
1189 reg |= E1000_CONNSW_ENRGSRC;
1190 wr32(E1000_CONNSW, reg);
1191 }
1192
1193 reg = rd32(E1000_PCS_LCTL);
1194
1195 /* default pcs_autoneg to the same setting as mac autoneg */
1196 pcs_autoneg = hw->mac.autoneg;
1197
1198 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1199 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1200 /* sgmii mode lets the phy handle forcing speed/duplex */
1201 pcs_autoneg = true;
1202 /* autoneg time out should be disabled for SGMII mode */
1203 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1204 break;
1205 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1206 /* disable PCS autoneg and support parallel detect only */
1207 pcs_autoneg = false;
1208 default:
1209 if (hw->mac.type == e1000_82575 ||
1210 hw->mac.type == e1000_82576) {
1211 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1212 if (ret_val) {
1213 printk(KERN_DEBUG "NVM Read Error\n\n");
1214 return ret_val;
1215 }
1216
1217 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1218 pcs_autoneg = false;
1219 }
1220
1221 /*
1222 * non-SGMII modes only supports a speed of 1000/Full for the
1223 * link so it is best to just force the MAC and let the pcs
1224 * link either autoneg or be forced to 1000/Full
1225 */
1226 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1227 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1228
1229 /* set speed of 1000/Full if speed/duplex is forced */
1230 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1231 break;
1232 }
1233
1234 wr32(E1000_CTRL, ctrl_reg);
1235
1236 /*
1237 * New SerDes mode allows for forcing speed or autonegotiating speed
1238 * at 1gb. Autoneg should be default set by most drivers. This is the
1239 * mode that will be compatible with older link partners and switches.
1240 * However, both are supported by the hardware and some drivers/tools.
1241 */
1242 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1243 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1244
1245 /*
1246 * We force flow control to prevent the CTRL register values from being
1247 * overwritten by the autonegotiated flow control values
1248 */
1249 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1250
1251 if (pcs_autoneg) {
1252 /* Set PCS register for autoneg */
1253 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1254 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1255 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1256 } else {
1257 /* Set PCS register for forced link */
1258 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1259
1260 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1261 }
1262
1263 wr32(E1000_PCS_LCTL, reg);
1264
1265 if (!igb_sgmii_active_82575(hw))
1266 igb_force_mac_fc(hw);
1267
1268 return ret_val;
1269}
1270
1271/**
1272 * igb_sgmii_active_82575 - Return sgmii state
1273 * @hw: pointer to the HW structure
1274 *
1275 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1276 * which can be enabled for use in the embedded applications. Simply
1277 * return the current state of the sgmii interface.
1278 **/
1279static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1280{
1281 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1282 return dev_spec->sgmii_active;
1283}
1284
1285/**
1286 * igb_reset_init_script_82575 - Inits HW defaults after reset
1287 * @hw: pointer to the HW structure
1288 *
1289 * Inits recommended HW defaults after a reset when there is no EEPROM
1290 * detected. This is only for the 82575.
1291 **/
1292static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1293{
1294 if (hw->mac.type == e1000_82575) {
1295 hw_dbg("Running reset init script for 82575\n");
1296 /* SerDes configuration via SERDESCTRL */
1297 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1298 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1299 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1300 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1301
1302 /* CCM configuration via CCMCTL register */
1303 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1304 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1305
1306 /* PCIe lanes configuration */
1307 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1308 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1309 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1310 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1311
1312 /* PCIe PLL Configuration */
1313 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1314 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1315 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1316 }
1317
1318 return 0;
1319}
1320
1321/**
1322 * igb_read_mac_addr_82575 - Read device MAC address
1323 * @hw: pointer to the HW structure
1324 **/
1325static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1326{
1327 s32 ret_val = 0;
1328
1329 /*
1330 * If there's an alternate MAC address place it in RAR0
1331 * so that it will override the Si installed default perm
1332 * address.
1333 */
1334 ret_val = igb_check_alt_mac_addr(hw);
1335 if (ret_val)
1336 goto out;
1337
1338 ret_val = igb_read_mac_addr(hw);
1339
1340out:
1341 return ret_val;
1342}
1343
1344/**
1345 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1346 * @hw: pointer to the HW structure
1347 *
1348 * In the case of a PHY power down to save power, or to turn off link during a
1349 * driver unload, or wake on lan is not enabled, remove the link.
1350 **/
1351void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1352{
1353 /* If the management interface is not enabled, then power down */
1354 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1355 igb_power_down_phy_copper(hw);
1356}
1357
1358/**
1359 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1360 * @hw: pointer to the HW structure
1361 *
1362 * Clears the hardware counters by reading the counter registers.
1363 **/
1364static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1365{
1366 igb_clear_hw_cntrs_base(hw);
1367
1368 rd32(E1000_PRC64);
1369 rd32(E1000_PRC127);
1370 rd32(E1000_PRC255);
1371 rd32(E1000_PRC511);
1372 rd32(E1000_PRC1023);
1373 rd32(E1000_PRC1522);
1374 rd32(E1000_PTC64);
1375 rd32(E1000_PTC127);
1376 rd32(E1000_PTC255);
1377 rd32(E1000_PTC511);
1378 rd32(E1000_PTC1023);
1379 rd32(E1000_PTC1522);
1380
1381 rd32(E1000_ALGNERRC);
1382 rd32(E1000_RXERRC);
1383 rd32(E1000_TNCRS);
1384 rd32(E1000_CEXTERR);
1385 rd32(E1000_TSCTC);
1386 rd32(E1000_TSCTFC);
1387
1388 rd32(E1000_MGTPRC);
1389 rd32(E1000_MGTPDC);
1390 rd32(E1000_MGTPTC);
1391
1392 rd32(E1000_IAC);
1393 rd32(E1000_ICRXOC);
1394
1395 rd32(E1000_ICRXPTC);
1396 rd32(E1000_ICRXATC);
1397 rd32(E1000_ICTXPTC);
1398 rd32(E1000_ICTXATC);
1399 rd32(E1000_ICTXQEC);
1400 rd32(E1000_ICTXQMTC);
1401 rd32(E1000_ICRXDMTC);
1402
1403 rd32(E1000_CBTMPC);
1404 rd32(E1000_HTDPMC);
1405 rd32(E1000_CBRMPC);
1406 rd32(E1000_RPTHC);
1407 rd32(E1000_HGPTC);
1408 rd32(E1000_HTCBDPC);
1409 rd32(E1000_HGORCL);
1410 rd32(E1000_HGORCH);
1411 rd32(E1000_HGOTCL);
1412 rd32(E1000_HGOTCH);
1413 rd32(E1000_LENERRS);
1414
1415 /* This register should not be read in copper configurations */
1416 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1417 igb_sgmii_active_82575(hw))
1418 rd32(E1000_SCVPC);
1419}
1420
1421/**
1422 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1423 * @hw: pointer to the HW structure
1424 *
1425 * After rx enable if managability is enabled then there is likely some
1426 * bad data at the start of the fifo and possibly in the DMA fifo. This
1427 * function clears the fifos and flushes any packets that came in as rx was
1428 * being enabled.
1429 **/
1430void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1431{
1432 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1433 int i, ms_wait;
1434
1435 if (hw->mac.type != e1000_82575 ||
1436 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1437 return;
1438
1439 /* Disable all RX queues */
1440 for (i = 0; i < 4; i++) {
1441 rxdctl[i] = rd32(E1000_RXDCTL(i));
1442 wr32(E1000_RXDCTL(i),
1443 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1444 }
1445 /* Poll all queues to verify they have shut down */
1446 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1447 msleep(1);
1448 rx_enabled = 0;
1449 for (i = 0; i < 4; i++)
1450 rx_enabled |= rd32(E1000_RXDCTL(i));
1451 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1452 break;
1453 }
1454
1455 if (ms_wait == 10)
1456 hw_dbg("Queue disable timed out after 10ms\n");
1457
1458 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1459 * incoming packets are rejected. Set enable and wait 2ms so that
1460 * any packet that was coming in as RCTL.EN was set is flushed
1461 */
1462 rfctl = rd32(E1000_RFCTL);
1463 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1464
1465 rlpml = rd32(E1000_RLPML);
1466 wr32(E1000_RLPML, 0);
1467
1468 rctl = rd32(E1000_RCTL);
1469 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1470 temp_rctl |= E1000_RCTL_LPE;
1471
1472 wr32(E1000_RCTL, temp_rctl);
1473 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1474 wrfl();
1475 msleep(2);
1476
1477 /* Enable RX queues that were previously enabled and restore our
1478 * previous state
1479 */
1480 for (i = 0; i < 4; i++)
1481 wr32(E1000_RXDCTL(i), rxdctl[i]);
1482 wr32(E1000_RCTL, rctl);
1483 wrfl();
1484
1485 wr32(E1000_RLPML, rlpml);
1486 wr32(E1000_RFCTL, rfctl);
1487
1488 /* Flush receive errors generated by workaround */
1489 rd32(E1000_ROC);
1490 rd32(E1000_RNBC);
1491 rd32(E1000_MPC);
1492}
1493
1494/**
1495 * igb_set_pcie_completion_timeout - set pci-e completion timeout
1496 * @hw: pointer to the HW structure
1497 *
1498 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1499 * however the hardware default for these parts is 500us to 1ms which is less
1500 * than the 10ms recommended by the pci-e spec. To address this we need to
1501 * increase the value to either 10ms to 200ms for capability version 1 config,
1502 * or 16ms to 55ms for version 2.
1503 **/
1504static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1505{
1506 u32 gcr = rd32(E1000_GCR);
1507 s32 ret_val = 0;
1508 u16 pcie_devctl2;
1509
1510 /* only take action if timeout value is defaulted to 0 */
1511 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1512 goto out;
1513
1514 /*
1515 * if capababilities version is type 1 we can write the
1516 * timeout of 10ms to 200ms through the GCR register
1517 */
1518 if (!(gcr & E1000_GCR_CAP_VER2)) {
1519 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1520 goto out;
1521 }
1522
1523 /*
1524 * for version 2 capabilities we need to write the config space
1525 * directly in order to set the completion timeout value for
1526 * 16ms to 55ms
1527 */
1528 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1529 &pcie_devctl2);
1530 if (ret_val)
1531 goto out;
1532
1533 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1534
1535 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1536 &pcie_devctl2);
1537out:
1538 /* disable completion timeout resend */
1539 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1540
1541 wr32(E1000_GCR, gcr);
1542 return ret_val;
1543}
1544
1545/**
1546 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1547 * @hw: pointer to the hardware struct
1548 * @enable: state to enter, either enabled or disabled
1549 * @pf: Physical Function pool - do not set anti-spoofing for the PF
1550 *
1551 * enables/disables L2 switch anti-spoofing functionality.
1552 **/
1553void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1554{
1555 u32 dtxswc;
1556
1557 switch (hw->mac.type) {
1558 case e1000_82576:
1559 case e1000_i350:
1560 dtxswc = rd32(E1000_DTXSWC);
1561 if (enable) {
1562 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1563 E1000_DTXSWC_VLAN_SPOOF_MASK);
1564 /* The PF can spoof - it has to in order to
1565 * support emulation mode NICs */
1566 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1567 } else {
1568 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1569 E1000_DTXSWC_VLAN_SPOOF_MASK);
1570 }
1571 wr32(E1000_DTXSWC, dtxswc);
1572 break;
1573 default:
1574 break;
1575 }
1576}
1577
1578/**
1579 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1580 * @hw: pointer to the hardware struct
1581 * @enable: state to enter, either enabled or disabled
1582 *
1583 * enables/disables L2 switch loopback functionality.
1584 **/
1585void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1586{
1587 u32 dtxswc = rd32(E1000_DTXSWC);
1588
1589 if (enable)
1590 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1591 else
1592 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1593
1594 wr32(E1000_DTXSWC, dtxswc);
1595}
1596
1597/**
1598 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
1599 * @hw: pointer to the hardware struct
1600 * @enable: state to enter, either enabled or disabled
1601 *
1602 * enables/disables replication of packets across multiple pools.
1603 **/
1604void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1605{
1606 u32 vt_ctl = rd32(E1000_VT_CTL);
1607
1608 if (enable)
1609 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1610 else
1611 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1612
1613 wr32(E1000_VT_CTL, vt_ctl);
1614}
1615
1616/**
1617 * igb_read_phy_reg_82580 - Read 82580 MDI control register
1618 * @hw: pointer to the HW structure
1619 * @offset: register offset to be read
1620 * @data: pointer to the read data
1621 *
1622 * Reads the MDI control register in the PHY at offset and stores the
1623 * information read to data.
1624 **/
1625static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1626{
1627 s32 ret_val;
1628
1629
1630 ret_val = hw->phy.ops.acquire(hw);
1631 if (ret_val)
1632 goto out;
1633
1634 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1635
1636 hw->phy.ops.release(hw);
1637
1638out:
1639 return ret_val;
1640}
1641
1642/**
1643 * igb_write_phy_reg_82580 - Write 82580 MDI control register
1644 * @hw: pointer to the HW structure
1645 * @offset: register offset to write to
1646 * @data: data to write to register at offset
1647 *
1648 * Writes data to MDI control register in the PHY at offset.
1649 **/
1650static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1651{
1652 s32 ret_val;
1653
1654
1655 ret_val = hw->phy.ops.acquire(hw);
1656 if (ret_val)
1657 goto out;
1658
1659 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1660
1661 hw->phy.ops.release(hw);
1662
1663out:
1664 return ret_val;
1665}
1666
1667/**
1668 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1669 * @hw: pointer to the HW structure
1670 *
1671 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1672 * the values found in the EEPROM. This addresses an issue in which these
1673 * bits are not restored from EEPROM after reset.
1674 **/
1675static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1676{
1677 s32 ret_val = 0;
1678 u32 mdicnfg;
1679 u16 nvm_data = 0;
1680
1681 if (hw->mac.type != e1000_82580)
1682 goto out;
1683 if (!igb_sgmii_active_82575(hw))
1684 goto out;
1685
1686 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1687 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1688 &nvm_data);
1689 if (ret_val) {
1690 hw_dbg("NVM Read Error\n");
1691 goto out;
1692 }
1693
1694 mdicnfg = rd32(E1000_MDICNFG);
1695 if (nvm_data & NVM_WORD24_EXT_MDIO)
1696 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1697 if (nvm_data & NVM_WORD24_COM_MDIO)
1698 mdicnfg |= E1000_MDICNFG_COM_MDIO;
1699 wr32(E1000_MDICNFG, mdicnfg);
1700out:
1701 return ret_val;
1702}
1703
1704/**
1705 * igb_reset_hw_82580 - Reset hardware
1706 * @hw: pointer to the HW structure
1707 *
1708 * This resets function or entire device (all ports, etc.)
1709 * to a known state.
1710 **/
1711static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1712{
1713 s32 ret_val = 0;
1714 /* BH SW mailbox bit in SW_FW_SYNC */
1715 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1716 u32 ctrl, icr;
1717 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1718
1719
1720 hw->dev_spec._82575.global_device_reset = false;
1721
1722 /* Get current control state. */
1723 ctrl = rd32(E1000_CTRL);
1724
1725 /*
1726 * Prevent the PCI-E bus from sticking if there is no TLP connection
1727 * on the last TLP read/write transaction when MAC is reset.
1728 */
1729 ret_val = igb_disable_pcie_master(hw);
1730 if (ret_val)
1731 hw_dbg("PCI-E Master disable polling has failed.\n");
1732
1733 hw_dbg("Masking off all interrupts\n");
1734 wr32(E1000_IMC, 0xffffffff);
1735 wr32(E1000_RCTL, 0);
1736 wr32(E1000_TCTL, E1000_TCTL_PSP);
1737 wrfl();
1738
1739 msleep(10);
1740
1741 /* Determine whether or not a global dev reset is requested */
1742 if (global_device_reset &&
1743 igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
1744 global_device_reset = false;
1745
1746 if (global_device_reset &&
1747 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1748 ctrl |= E1000_CTRL_DEV_RST;
1749 else
1750 ctrl |= E1000_CTRL_RST;
1751
1752 wr32(E1000_CTRL, ctrl);
1753 wrfl();
1754
1755 /* Add delay to insure DEV_RST has time to complete */
1756 if (global_device_reset)
1757 msleep(5);
1758
1759 ret_val = igb_get_auto_rd_done(hw);
1760 if (ret_val) {
1761 /*
1762 * When auto config read does not complete, do not
1763 * return with an error. This can happen in situations
1764 * where there is no eeprom and prevents getting link.
1765 */
1766 hw_dbg("Auto Read Done did not complete\n");
1767 }
1768
1769 /* If EEPROM is not present, run manual init scripts */
1770 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1771 igb_reset_init_script_82575(hw);
1772
1773 /* clear global device reset status bit */
1774 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
1775
1776 /* Clear any pending interrupt events. */
1777 wr32(E1000_IMC, 0xffffffff);
1778 icr = rd32(E1000_ICR);
1779
1780 ret_val = igb_reset_mdicnfg_82580(hw);
1781 if (ret_val)
1782 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
1783
1784 /* Install any alternate MAC address into RAR0 */
1785 ret_val = igb_check_alt_mac_addr(hw);
1786
1787 /* Release semaphore */
1788 if (global_device_reset)
1789 igb_release_swfw_sync_82575(hw, swmbsw_mask);
1790
1791 return ret_val;
1792}
1793
1794/**
1795 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1796 * @data: data received by reading RXPBS register
1797 *
1798 * The 82580 uses a table based approach for packet buffer allocation sizes.
1799 * This function converts the retrieved value into the correct table value
1800 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1801 * 0x0 36 72 144 1 2 4 8 16
1802 * 0x8 35 70 140 rsv rsv rsv rsv rsv
1803 */
1804u16 igb_rxpbs_adjust_82580(u32 data)
1805{
1806 u16 ret_val = 0;
1807
1808 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1809 ret_val = e1000_82580_rxpbs_table[data];
1810
1811 return ret_val;
1812}
1813
1814/**
1815 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
1816 * checksum
1817 * @hw: pointer to the HW structure
1818 * @offset: offset in words of the checksum protected region
1819 *
1820 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1821 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1822 **/
1823s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1824{
1825 s32 ret_val = 0;
1826 u16 checksum = 0;
1827 u16 i, nvm_data;
1828
1829 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1830 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1831 if (ret_val) {
1832 hw_dbg("NVM Read Error\n");
1833 goto out;
1834 }
1835 checksum += nvm_data;
1836 }
1837
1838 if (checksum != (u16) NVM_SUM) {
1839 hw_dbg("NVM Checksum Invalid\n");
1840 ret_val = -E1000_ERR_NVM;
1841 goto out;
1842 }
1843
1844out:
1845 return ret_val;
1846}
1847
1848/**
1849 * igb_update_nvm_checksum_with_offset - Update EEPROM
1850 * checksum
1851 * @hw: pointer to the HW structure
1852 * @offset: offset in words of the checksum protected region
1853 *
1854 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
1855 * up to the checksum. Then calculates the EEPROM checksum and writes the
1856 * value to the EEPROM.
1857 **/
1858s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1859{
1860 s32 ret_val;
1861 u16 checksum = 0;
1862 u16 i, nvm_data;
1863
1864 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
1865 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1866 if (ret_val) {
1867 hw_dbg("NVM Read Error while updating checksum.\n");
1868 goto out;
1869 }
1870 checksum += nvm_data;
1871 }
1872 checksum = (u16) NVM_SUM - checksum;
1873 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
1874 &checksum);
1875 if (ret_val)
1876 hw_dbg("NVM Write Error while updating checksum.\n");
1877
1878out:
1879 return ret_val;
1880}
1881
1882/**
1883 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
1884 * @hw: pointer to the HW structure
1885 *
1886 * Calculates the EEPROM section checksum by reading/adding each word of
1887 * the EEPROM and then verifies that the sum of the EEPROM is
1888 * equal to 0xBABA.
1889 **/
1890static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
1891{
1892 s32 ret_val = 0;
1893 u16 eeprom_regions_count = 1;
1894 u16 j, nvm_data;
1895 u16 nvm_offset;
1896
1897 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1898 if (ret_val) {
1899 hw_dbg("NVM Read Error\n");
1900 goto out;
1901 }
1902
1903 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
1904 /* if checksums compatibility bit is set validate checksums
1905 * for all 4 ports. */
1906 eeprom_regions_count = 4;
1907 }
1908
1909 for (j = 0; j < eeprom_regions_count; j++) {
1910 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1911 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1912 nvm_offset);
1913 if (ret_val != 0)
1914 goto out;
1915 }
1916
1917out:
1918 return ret_val;
1919}
1920
1921/**
1922 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
1923 * @hw: pointer to the HW structure
1924 *
1925 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1926 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1927 * checksum and writes the value to the EEPROM.
1928 **/
1929static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
1930{
1931 s32 ret_val;
1932 u16 j, nvm_data;
1933 u16 nvm_offset;
1934
1935 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1936 if (ret_val) {
1937 hw_dbg("NVM Read Error while updating checksum"
1938 " compatibility bit.\n");
1939 goto out;
1940 }
1941
1942 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
1943 /* set compatibility bit to validate checksums appropriately */
1944 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
1945 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
1946 &nvm_data);
1947 if (ret_val) {
1948 hw_dbg("NVM Write Error while updating checksum"
1949 " compatibility bit.\n");
1950 goto out;
1951 }
1952 }
1953
1954 for (j = 0; j < 4; j++) {
1955 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1956 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
1957 if (ret_val)
1958 goto out;
1959 }
1960
1961out:
1962 return ret_val;
1963}
1964
1965/**
1966 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
1967 * @hw: pointer to the HW structure
1968 *
1969 * Calculates the EEPROM section checksum by reading/adding each word of
1970 * the EEPROM and then verifies that the sum of the EEPROM is
1971 * equal to 0xBABA.
1972 **/
1973static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
1974{
1975 s32 ret_val = 0;
1976 u16 j;
1977 u16 nvm_offset;
1978
1979 for (j = 0; j < 4; j++) {
1980 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1981 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1982 nvm_offset);
1983 if (ret_val != 0)
1984 goto out;
1985 }
1986
1987out:
1988 return ret_val;
1989}
1990
1991/**
1992 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
1993 * @hw: pointer to the HW structure
1994 *
1995 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1996 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1997 * checksum and writes the value to the EEPROM.
1998 **/
1999static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2000{
2001 s32 ret_val = 0;
2002 u16 j;
2003 u16 nvm_offset;
2004
2005 for (j = 0; j < 4; j++) {
2006 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2007 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2008 if (ret_val != 0)
2009 goto out;
2010 }
2011
2012out:
2013 return ret_val;
2014}
2015
2016/**
2017 * igb_set_eee_i350 - Enable/disable EEE support
2018 * @hw: pointer to the HW structure
2019 *
2020 * Enable/disable EEE based on setting in dev_spec structure.
2021 *
2022 **/
2023s32 igb_set_eee_i350(struct e1000_hw *hw)
2024{
2025 s32 ret_val = 0;
2026 u32 ipcnfg, eeer, ctrl_ext;
2027
2028 ctrl_ext = rd32(E1000_CTRL_EXT);
2029 if ((hw->mac.type != e1000_i350) ||
2030 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2031 goto out;
2032 ipcnfg = rd32(E1000_IPCNFG);
2033 eeer = rd32(E1000_EEER);
2034
2035 /* enable or disable per user setting */
2036 if (!(hw->dev_spec._82575.eee_disable)) {
2037 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2038 E1000_IPCNFG_EEE_100M_AN);
2039 eeer |= (E1000_EEER_TX_LPI_EN |
2040 E1000_EEER_RX_LPI_EN |
2041 E1000_EEER_LPI_FC);
2042
2043 } else {
2044 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2045 E1000_IPCNFG_EEE_100M_AN);
2046 eeer &= ~(E1000_EEER_TX_LPI_EN |
2047 E1000_EEER_RX_LPI_EN |
2048 E1000_EEER_LPI_FC);
2049 }
2050 wr32(E1000_IPCNFG, ipcnfg);
2051 wr32(E1000_EEER, eeer);
2052out:
2053
2054 return ret_val;
2055}
2056
2057static struct e1000_mac_operations e1000_mac_ops_82575 = {
2058 .init_hw = igb_init_hw_82575,
2059 .check_for_link = igb_check_for_link_82575,
2060 .rar_set = igb_rar_set,
2061 .read_mac_addr = igb_read_mac_addr_82575,
2062 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2063};
2064
2065static struct e1000_phy_operations e1000_phy_ops_82575 = {
2066 .acquire = igb_acquire_phy_82575,
2067 .get_cfg_done = igb_get_cfg_done_82575,
2068 .release = igb_release_phy_82575,
2069};
2070
2071static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2072 .acquire = igb_acquire_nvm_82575,
2073 .read = igb_read_nvm_eerd,
2074 .release = igb_release_nvm_82575,
2075 .write = igb_write_nvm_spi,
2076};
2077
2078const struct e1000_info e1000_82575_info = {
2079 .get_invariants = igb_get_invariants_82575,
2080 .mac_ops = &e1000_mac_ops_82575,
2081 .phy_ops = &e1000_phy_ops_82575,
2082 .nvm_ops = &e1000_nvm_ops_82575,
2083};
2084
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
new file mode 100644
index 00000000000..786e110011a
--- /dev/null
+++ b/drivers/net/igb/e1000_82575.h
@@ -0,0 +1,258 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_
30
31extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
32extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
33extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
35
36#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
37 (ID_LED_DEF1_DEF2 << 8) | \
38 (ID_LED_DEF1_DEF2 << 4) | \
39 (ID_LED_OFF1_ON2))
40
41#define E1000_RAR_ENTRIES_82575 16
42#define E1000_RAR_ENTRIES_82576 24
43#define E1000_RAR_ENTRIES_82580 24
44#define E1000_RAR_ENTRIES_I350 32
45
46#define E1000_SW_SYNCH_MB 0x00000100
47#define E1000_STAT_DEV_RST_SET 0x00100000
48#define E1000_CTRL_DEV_RST 0x20000000
49
50/* SRRCTL bit definitions */
51#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
52#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
53#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
54#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
55#define E1000_SRRCTL_DROP_EN 0x80000000
56#define E1000_SRRCTL_TIMESTAMP 0x40000000
57
58#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
59#define E1000_MRQC_ENABLE_VMDQ 0x00000003
60#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
61#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
62#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
63#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
64
65#define E1000_EICR_TX_QUEUE ( \
66 E1000_EICR_TX_QUEUE0 | \
67 E1000_EICR_TX_QUEUE1 | \
68 E1000_EICR_TX_QUEUE2 | \
69 E1000_EICR_TX_QUEUE3)
70
71#define E1000_EICR_RX_QUEUE ( \
72 E1000_EICR_RX_QUEUE0 | \
73 E1000_EICR_RX_QUEUE1 | \
74 E1000_EICR_RX_QUEUE2 | \
75 E1000_EICR_RX_QUEUE3)
76
77/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
78#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
79#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
80
81/* Receive Descriptor - Advanced */
82union e1000_adv_rx_desc {
83 struct {
84 __le64 pkt_addr; /* Packet buffer address */
85 __le64 hdr_addr; /* Header buffer address */
86 } read;
87 struct {
88 struct {
89 struct {
90 __le16 pkt_info; /* RSS type, Packet type */
91 __le16 hdr_info; /* Split Header,
92 * header buffer length */
93 } lo_dword;
94 union {
95 __le32 rss; /* RSS Hash */
96 struct {
97 __le16 ip_id; /* IP id */
98 __le16 csum; /* Packet Checksum */
99 } csum_ip;
100 } hi_dword;
101 } lower;
102 struct {
103 __le32 status_error; /* ext status/error */
104 __le16 length; /* Packet length */
105 __le16 vlan; /* VLAN tag */
106 } upper;
107 } wb; /* writeback */
108};
109
110#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
111#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
112#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
113#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
114
115/* Transmit Descriptor - Advanced */
116union e1000_adv_tx_desc {
117 struct {
118 __le64 buffer_addr; /* Address of descriptor's data buf */
119 __le32 cmd_type_len;
120 __le32 olinfo_status;
121 } read;
122 struct {
123 __le64 rsvd; /* Reserved */
124 __le32 nxtseq_seed;
125 __le32 status;
126 } wb;
127};
128
129/* Adv Transmit Descriptor Config Masks */
130#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
131#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
132#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
133#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
134#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
135#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
136#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
137#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
138
139/* Context descriptors */
140struct e1000_adv_tx_context_desc {
141 __le32 vlan_macip_lens;
142 __le32 seqnum_seed;
143 __le32 type_tucmd_mlhl;
144 __le32 mss_l4len_idx;
145};
146
147#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
148#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
149#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
150#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
151/* IPSec Encrypt Enable for ESP */
152#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
153#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
154/* Adv ctxt IPSec SA IDX mask */
155/* Adv ctxt IPSec ESP len mask */
156
157/* Additional Transmit Descriptor Control definitions */
158#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
159/* Tx Queue Arbitration Priority 0=low, 1=high */
160
161/* Additional Receive Descriptor Control definitions */
162#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
163
164/* Direct Cache Access (DCA) definitions */
165#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
166#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
167
168#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
169#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
170#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
171#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
172
173#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
174#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
175#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
176
177/* Additional DCA related definitions, note change in position of CPUID */
178#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
179#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
180#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
181#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
182
183/* ETQF register bit definitions */
184#define E1000_ETQF_FILTER_ENABLE (1 << 26)
185#define E1000_ETQF_1588 (1 << 30)
186
187/* FTQF register bit definitions */
188#define E1000_FTQF_VF_BP 0x00008000
189#define E1000_FTQF_1588_TIME_STAMP 0x08000000
190#define E1000_FTQF_MASK 0xF0000000
191#define E1000_FTQF_MASK_PROTO_BP 0x10000000
192#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
193
194#define E1000_NVM_APME_82575 0x0400
195#define MAX_NUM_VFS 8
196
197#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
198#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
199#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
200#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
201#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
202
203/* Easy defines for setting default pool, would normally be left a zero */
204#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
205#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
206
207/* Other useful VMD_CTL register defines */
208#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
209#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
210#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
211
212/* Per VM Offload register setup */
213#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
214#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
215#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
216#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
217#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
218#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
219#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
220#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
221#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
222#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
223
224#define E1000_VLVF_ARRAY_SIZE 32
225#define E1000_VLVF_VLANID_MASK 0x00000FFF
226#define E1000_VLVF_POOLSEL_SHIFT 12
227#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
228#define E1000_VLVF_LVLAN 0x00100000
229#define E1000_VLVF_VLANID_ENABLE 0x80000000
230
231#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
232#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
233
234#define E1000_IOVCTL 0x05BBC
235#define E1000_IOVCTL_REUSE_VFQ 0x00000001
236
237#define E1000_RPLOLR_STRVLAN 0x40000000
238#define E1000_RPLOLR_STRCRC 0x80000000
239
240#define E1000_DTXCTL_8023LL 0x0004
241#define E1000_DTXCTL_VLAN_ADDED 0x0008
242#define E1000_DTXCTL_OOS_ENABLE 0x0010
243#define E1000_DTXCTL_MDP_EN 0x0020
244#define E1000_DTXCTL_SPOOF_INT 0x0040
245
246#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
247
248#define ALL_QUEUES 0xFFFF
249
250/* RX packet buffer size defines */
251#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
252void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
253void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
254void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
255u16 igb_rxpbs_adjust_82580(u32 data);
256s32 igb_set_eee_i350(struct e1000_hw *);
257
258#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
new file mode 100644
index 00000000000..7b8ddd830f1
--- /dev/null
+++ b/drivers/net/igb/e1000_defines.h
@@ -0,0 +1,834 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_DEFINES_H_
29#define _E1000_DEFINES_H_
30
31/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
32#define REQ_TX_DESCRIPTOR_MULTIPLE 8
33#define REQ_RX_DESCRIPTOR_MULTIPLE 8
34
35/* Definitions for power management and wakeup registers */
36/* Wake Up Control */
37#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
38
39/* Wake Up Filter Control */
40#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
41#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
42#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
43#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
45
46/* Extended Device Control */
47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
48/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
55#define E1000_CTRL_EXT_EIAME 0x01000000
56#define E1000_CTRL_EXT_IRCA 0x00000001
57/* Interrupt delay cancellation */
58/* Driver loaded bit for FW */
59#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
60/* Interrupt acknowledge Auto-mask */
61/* Clear Interrupt timers after IMS clear */
62/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000
68#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
69#define E1000_I2CCMD_READY 0x20000000
70#define E1000_I2CCMD_ERROR 0x80000000
71#define E1000_MAX_SGMII_PHY_REG_ADDR 255
72#define E1000_I2CCMD_PHY_TIMEOUT 200
73#define E1000_IVAR_VALID 0x80
74#define E1000_GPIE_NSICR 0x00000001
75#define E1000_GPIE_MSIX_MODE 0x00000010
76#define E1000_GPIE_EIAME 0x40000000
77#define E1000_GPIE_PBA 0x80000000
78
79/* Receive Descriptor bit definitions */
80#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
81#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
82#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
83#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
84#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
85#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
86#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
87
88#define E1000_RXDEXT_STATERR_CE 0x01000000
89#define E1000_RXDEXT_STATERR_SE 0x02000000
90#define E1000_RXDEXT_STATERR_SEQ 0x04000000
91#define E1000_RXDEXT_STATERR_CXE 0x10000000
92#define E1000_RXDEXT_STATERR_TCPE 0x20000000
93#define E1000_RXDEXT_STATERR_IPE 0x40000000
94#define E1000_RXDEXT_STATERR_RXE 0x80000000
95
96/* Same mask, but for extended and packet split descriptors */
97#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
98 E1000_RXDEXT_STATERR_CE | \
99 E1000_RXDEXT_STATERR_SE | \
100 E1000_RXDEXT_STATERR_SEQ | \
101 E1000_RXDEXT_STATERR_CXE | \
102 E1000_RXDEXT_STATERR_RXE)
103
104#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
105#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
106#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
107#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
108#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
109
110
111/* Management Control */
112#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
113#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
114#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
115/* Enable Neighbor Discovery Filtering */
116#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
117#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
118/* Enable MAC address filtering */
119#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
120
121/* Receive Control */
122#define E1000_RCTL_EN 0x00000002 /* enable */
123#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
124#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
125#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
126#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
127#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
128#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
129#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
130#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
131#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
132#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
133#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
134#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
135#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
136#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
137
138/*
139 * Use byte values for the following shift parameters
140 * Usage:
141 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
142 * E1000_PSRCTL_BSIZE0_MASK) |
143 * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
144 * E1000_PSRCTL_BSIZE1_MASK) |
145 * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
146 * E1000_PSRCTL_BSIZE2_MASK) |
147 * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
148 * E1000_PSRCTL_BSIZE3_MASK))
149 * where value0 = [128..16256], default=256
150 * value1 = [1024..64512], default=4096
151 * value2 = [0..64512], default=4096
152 * value3 = [0..64512], default=0
153 */
154
155#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
156#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
157#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
158#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
159
160#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
161#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
162#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
163#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
164
165/* SWFW_SYNC Definitions */
166#define E1000_SWFW_EEP_SM 0x1
167#define E1000_SWFW_PHY0_SM 0x2
168#define E1000_SWFW_PHY1_SM 0x4
169#define E1000_SWFW_PHY2_SM 0x20
170#define E1000_SWFW_PHY3_SM 0x40
171
172/* FACTPS Definitions */
173/* Device Control */
174#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
175#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
176#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
177#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
178#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
179#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
180#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
181#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
182#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
183#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
184#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
185/* Defined polarity of Dock/Undock indication in SDP[0] */
186/* Reset both PHY ports, through PHYRST_N pin */
187/* enable link status from external LINK_0 and LINK_1 pins */
188#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
189#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
190#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
191#define E1000_CTRL_RST 0x04000000 /* Global reset */
192#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
193#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
194#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
195#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
196/* Initiate an interrupt to manageability engine */
197#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
198
199/* Bit definitions for the Management Data IO (MDIO) and Management Data
200 * Clock (MDC) pins in the Device Control Register.
201 */
202
203#define E1000_CONNSW_ENRGSRC 0x4
204#define E1000_PCS_CFG_PCS_EN 8
205#define E1000_PCS_LCTL_FLV_LINK_UP 1
206#define E1000_PCS_LCTL_FSV_100 2
207#define E1000_PCS_LCTL_FSV_1000 4
208#define E1000_PCS_LCTL_FDV_FULL 8
209#define E1000_PCS_LCTL_FSD 0x10
210#define E1000_PCS_LCTL_FORCE_LINK 0x20
211#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
212#define E1000_PCS_LCTL_AN_ENABLE 0x10000
213#define E1000_PCS_LCTL_AN_RESTART 0x20000
214#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
215#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
216
217#define E1000_PCS_LSTS_LINK_OK 1
218#define E1000_PCS_LSTS_SPEED_100 2
219#define E1000_PCS_LSTS_SPEED_1000 4
220#define E1000_PCS_LSTS_DUPLEX_FULL 8
221#define E1000_PCS_LSTS_SYNK_OK 0x10
222
223/* Device Status */
224#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
225#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
226#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
227#define E1000_STATUS_FUNC_SHIFT 2
228#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
229#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
230#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
231#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
232/* Change in Dock/Undock state. Clear on write '0'. */
233/* Status of Master requests. */
234#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
235/* BMC external code execution disabled */
236
237/* Constants used to intrepret the masked PCI-X bus speed. */
238
239#define SPEED_10 10
240#define SPEED_100 100
241#define SPEED_1000 1000
242#define HALF_DUPLEX 1
243#define FULL_DUPLEX 2
244
245
246#define ADVERTISE_10_HALF 0x0001
247#define ADVERTISE_10_FULL 0x0002
248#define ADVERTISE_100_HALF 0x0004
249#define ADVERTISE_100_FULL 0x0008
250#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
251#define ADVERTISE_1000_FULL 0x0020
252
253/* 1000/H is not supported, nor spec-compliant. */
254#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
255 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
256 ADVERTISE_1000_FULL)
257#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
258 ADVERTISE_100_HALF | ADVERTISE_100_FULL)
259#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
260#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
261#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
262 ADVERTISE_1000_FULL)
263#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
264
265#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
266
267/* LED Control */
268#define E1000_LEDCTL_LED0_MODE_SHIFT 0
269#define E1000_LEDCTL_LED0_BLINK 0x00000080
270
271#define E1000_LEDCTL_MODE_LED_ON 0xE
272#define E1000_LEDCTL_MODE_LED_OFF 0xF
273
274/* Transmit Descriptor bit definitions */
275#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
276#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
277#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
278#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
279#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
280#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
281#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
282/* Extended desc bits for Linksec and timesync */
283
284/* Transmit Control */
285#define E1000_TCTL_EN 0x00000002 /* enable tx */
286#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
287#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
288#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
289#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
290
291/* DMA Coalescing register fields */
292#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
293 * Watchdog Timer */
294#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
295 * Threshold */
296#define E1000_DMACR_DMACTHR_SHIFT 16
297#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
298 * transactions */
299#define E1000_DMACR_DMAC_LX_SHIFT 28
300#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
301
302#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
303 * Threshold */
304
305#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
306
307#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
308 * Threshold */
309#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
310 * current window */
311
312#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
313 * Current Cnt */
314
315#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
316 * High val */
317#define E1000_FCRTC_RTH_COAL_SHIFT 4
318#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
319
320/* SerDes Control */
321#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
322
323/* Receive Checksum Control */
324#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
325#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
326#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
327#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
328
329/* Header split receive */
330#define E1000_RFCTL_LEF 0x00040000
331
332/* Collision related configuration parameters */
333#define E1000_COLLISION_THRESHOLD 15
334#define E1000_CT_SHIFT 4
335#define E1000_COLLISION_DISTANCE 63
336#define E1000_COLD_SHIFT 12
337
338/* Ethertype field values */
339#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
340
341#define MAX_JUMBO_FRAME_SIZE 0x3F00
342
343/* PBA constants */
344#define E1000_PBA_34K 0x0022
345#define E1000_PBA_64K 0x0040 /* 64KB */
346
347/* SW Semaphore Register */
348#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
349#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
350
351/* Interrupt Cause Read */
352#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
353#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
354#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
355#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
356#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
357#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
358#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
359/* If this bit asserted, the driver should claim the interrupt */
360#define E1000_ICR_INT_ASSERTED 0x80000000
361/* LAN connected device generates an interrupt */
362#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
363
364/* Extended Interrupt Cause Read */
365#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
366#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
367#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
368#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
369#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
370#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
371#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
372#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
373#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
374/* TCP Timer */
375
376/*
377 * This defines the bits that are set in the Interrupt Mask
378 * Set/Read Register. Each bit is documented below:
379 * o RXT0 = Receiver Timer Interrupt (ring 0)
380 * o TXDW = Transmit Descriptor Written Back
381 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
382 * o RXSEQ = Receive Sequence Error
383 * o LSC = Link Status Change
384 */
385#define IMS_ENABLE_MASK ( \
386 E1000_IMS_RXT0 | \
387 E1000_IMS_TXDW | \
388 E1000_IMS_RXDMT0 | \
389 E1000_IMS_RXSEQ | \
390 E1000_IMS_LSC | \
391 E1000_IMS_DOUTSYNC)
392
393/* Interrupt Mask Set */
394#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
395#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
396#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
397#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
398#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
399#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
400#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
401#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
402
403/* Extended Interrupt Mask Set */
404#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
405
406/* Interrupt Cause Set */
407#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
408#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
409#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
410
411/* Extended Interrupt Cause Set */
412
413/* Transmit Descriptor Control */
414/* Enable the counting of descriptors still to be processed. */
415
416/* Flow Control Constants */
417#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
418#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
419#define FLOW_CONTROL_TYPE 0x8808
420
421/* 802.1q VLAN Packet Size */
422#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
423#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
424
425/* Receive Address */
426/*
427 * Number of high/low register pairs in the RAR. The RAR (Receive Address
428 * Registers) holds the directed and multicast addresses that we monitor.
429 * Technically, we have 16 spots. However, we reserve one of these spots
430 * (RAR[15]) for our directed address used by controllers with
431 * manageability enabled, allowing us room for 15 multicast addresses.
432 */
433#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
434#define E1000_RAL_MAC_ADDR_LEN 4
435#define E1000_RAH_MAC_ADDR_LEN 2
436#define E1000_RAH_POOL_MASK 0x03FC0000
437#define E1000_RAH_POOL_1 0x00040000
438
439/* Error Codes */
440#define E1000_SUCCESS 0
441#define E1000_ERR_NVM 1
442#define E1000_ERR_PHY 2
443#define E1000_ERR_CONFIG 3
444#define E1000_ERR_PARAM 4
445#define E1000_ERR_MAC_INIT 5
446#define E1000_ERR_RESET 9
447#define E1000_ERR_MASTER_REQUESTS_PENDING 10
448#define E1000_BLK_PHY_RESET 12
449#define E1000_ERR_SWFW_SYNC 13
450#define E1000_NOT_IMPLEMENTED 14
451#define E1000_ERR_MBX 15
452#define E1000_ERR_INVALID_ARGUMENT 16
453#define E1000_ERR_NO_SPACE 17
454#define E1000_ERR_NVM_PBA_SECTION 18
455
456/* Loop limit on how long we wait for auto-negotiation to complete */
457#define COPPER_LINK_UP_LIMIT 10
458#define PHY_AUTO_NEG_LIMIT 45
459#define PHY_FORCE_LIMIT 20
460/* Number of 100 microseconds we wait for PCI Express master disable */
461#define MASTER_DISABLE_TIMEOUT 800
462/* Number of milliseconds we wait for PHY configuration done after MAC reset */
463#define PHY_CFG_TIMEOUT 100
464/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
465/* Number of milliseconds for NVM auto read done after MAC reset. */
466#define AUTO_READ_DONE_TIMEOUT 10
467
468/* Flow Control */
469#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
470
471#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
472#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
473
474#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
475#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
476#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
477#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
478#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
479#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
480#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
481#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
482
483#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
484#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
485#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
486#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
487#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
488#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
489
490#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
491#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
492#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
493#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
494#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
495#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
496#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
497#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
498#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
499#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
500#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
501
502#define E1000_TIMINCA_16NS_SHIFT 24
503
504#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
505#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
506#define E1000_MDICNFG_PHY_MASK 0x03E00000
507#define E1000_MDICNFG_PHY_SHIFT 21
508
509/* PCI Express Control */
510#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
511#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
512#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
513#define E1000_GCR_CAP_VER2 0x00040000
514
515/* mPHY Address Control and Data Registers */
516#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
517#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
518#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
519
520/* mPHY PCS CLK Register */
521#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
522/* mPHY Near End Digital Loopback Override Bit */
523#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
524
525/* PHY Control Register */
526#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
527#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
528#define MII_CR_POWER_DOWN 0x0800 /* Power down */
529#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
530#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
531#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
532#define MII_CR_SPEED_1000 0x0040
533#define MII_CR_SPEED_100 0x2000
534#define MII_CR_SPEED_10 0x0000
535
536/* PHY Status Register */
537#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
538#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
539
540/* Autoneg Advertisement Register */
541#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
542#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
543#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
544#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
545#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
546#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
547
548/* Link Partner Ability Register (Base Page) */
549#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
550#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
551
552/* Autoneg Expansion Register */
553
554/* 1000BASE-T Control Register */
555#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
556#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
557#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
558 /* 0=Configure PHY as Slave */
559#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
560 /* 0=Automatic Master/Slave config */
561
562/* 1000BASE-T Status Register */
563#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
564#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
565
566
567/* PHY 1000 MII Register/Bit Definitions */
568/* PHY Registers defined by IEEE */
569#define PHY_CONTROL 0x00 /* Control Register */
570#define PHY_STATUS 0x01 /* Status Register */
571#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
572#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
573#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
574#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
575#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
576#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
577
578/* NVM Control */
579#define E1000_EECD_SK 0x00000001 /* NVM Clock */
580#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
581#define E1000_EECD_DI 0x00000004 /* NVM Data In */
582#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
583#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
584#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
585#define E1000_EECD_PRES 0x00000100 /* NVM Present */
586/* NVM Addressing bits based on type 0=small, 1=large */
587#define E1000_EECD_ADDR_BITS 0x00000400
588#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
589#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
590#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
591#define E1000_EECD_SIZE_EX_SHIFT 11
592
593/* Offset to data in NVM read/write registers */
594#define E1000_NVM_RW_REG_DATA 16
595#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
596#define E1000_NVM_RW_REG_START 1 /* Start operation */
597#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
598#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
599
600/* NVM Word Offsets */
601#define NVM_COMPAT 0x0003
602#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
603#define NVM_INIT_CONTROL2_REG 0x000F
604#define NVM_INIT_CONTROL3_PORT_B 0x0014
605#define NVM_INIT_CONTROL3_PORT_A 0x0024
606#define NVM_ALT_MAC_ADDR_PTR 0x0037
607#define NVM_CHECKSUM_REG 0x003F
608#define NVM_COMPATIBILITY_REG_3 0x0003
609#define NVM_COMPATIBILITY_BIT_MASK 0x8000
610
611#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
612#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
613#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
614#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
615
616#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
617
618/* Mask bits for fields in Word 0x24 of the NVM */
619#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
620#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
621
622/* Mask bits for fields in Word 0x0f of the NVM */
623#define NVM_WORD0F_PAUSE_MASK 0x3000
624#define NVM_WORD0F_ASM_DIR 0x2000
625
626/* Mask bits for fields in Word 0x1a of the NVM */
627
628/* length of string needed to store part num */
629#define E1000_PBANUM_LENGTH 11
630
631/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
632#define NVM_SUM 0xBABA
633
634#define NVM_PBA_OFFSET_0 8
635#define NVM_PBA_OFFSET_1 9
636#define NVM_PBA_PTR_GUARD 0xFAFA
637#define NVM_WORD_SIZE_BASE_SHIFT 6
638
639/* NVM Commands - Microwire */
640
641/* NVM Commands - SPI */
642#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
643#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
644#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
645#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
646#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
647#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
648
649/* SPI NVM Status Register */
650#define NVM_STATUS_RDY_SPI 0x01
651
652/* Word definitions for ID LED Settings */
653#define ID_LED_RESERVED_0000 0x0000
654#define ID_LED_RESERVED_FFFF 0xFFFF
655#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
656 (ID_LED_OFF1_OFF2 << 8) | \
657 (ID_LED_DEF1_DEF2 << 4) | \
658 (ID_LED_DEF1_DEF2))
659#define ID_LED_DEF1_DEF2 0x1
660#define ID_LED_DEF1_ON2 0x2
661#define ID_LED_DEF1_OFF2 0x3
662#define ID_LED_ON1_DEF2 0x4
663#define ID_LED_ON1_ON2 0x5
664#define ID_LED_ON1_OFF2 0x6
665#define ID_LED_OFF1_DEF2 0x7
666#define ID_LED_OFF1_ON2 0x8
667#define ID_LED_OFF1_OFF2 0x9
668
669#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
670#define IGP_ACTIVITY_LED_ENABLE 0x0300
671#define IGP_LED3_MODE 0x07000000
672
673/* PCI/PCI-X/PCI-EX Config space */
674#define PCIE_DEVICE_CONTROL2 0x28
675#define PCIE_DEVICE_CONTROL2_16ms 0x0005
676
677#define PHY_REVISION_MASK 0xFFFFFFF0
678#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
679#define MAX_PHY_MULTI_PAGE_REG 0xF
680
681/* Bit definitions for valid PHY IDs. */
682/*
683 * I = Integrated
684 * E = External
685 */
686#define M88E1111_I_PHY_ID 0x01410CC0
687#define M88E1112_E_PHY_ID 0x01410C90
688#define I347AT4_E_PHY_ID 0x01410DC0
689#define IGP03E1000_E_PHY_ID 0x02A80390
690#define I82580_I_PHY_ID 0x015403A0
691#define I350_I_PHY_ID 0x015403B0
692#define M88_VENDOR 0x0141
693
694/* M88E1000 Specific Registers */
695#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
696#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
697#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
698
699#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
700#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
701
702/* M88E1000 PHY Specific Control Register */
703#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
704/* 1=CLK125 low, 0=CLK125 toggling */
705#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
706 /* Manual MDI configuration */
707#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
708/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
709#define M88E1000_PSCR_AUTO_X_1000T 0x0040
710/* Auto crossover enabled all speeds */
711#define M88E1000_PSCR_AUTO_X_MODE 0x0060
712/*
713 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
714 * 0=Normal 10BASE-T Rx Threshold
715 */
716/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
717#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
718
719/* M88E1000 PHY Specific Status Register */
720#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
721#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
722#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
723/*
724 * 0 = <50M
725 * 1 = 50-80M
726 * 2 = 80-110M
727 * 3 = 110-140M
728 * 4 = >140M
729 */
730#define M88E1000_PSSR_CABLE_LENGTH 0x0380
731#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
732#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
733
734#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
735
736/* M88E1000 Extended PHY Specific Control Register */
737/*
738 * 1 = Lost lock detect enabled.
739 * Will assert lost lock and bring
740 * link down if idle not seen
741 * within 1ms in 1000BASE-T
742 */
743/*
744 * Number of times we will attempt to autonegotiate before downshifting if we
745 * are the master
746 */
747#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
748#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
749/*
750 * Number of times we will attempt to autonegotiate before downshifting if we
751 * are the slave
752 */
753#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
754#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
755#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
756
757/* Intel i347-AT4 Registers */
758
759#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
760#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
761#define I347AT4_PAGE_SELECT 0x16
762
763/* i347-AT4 Extended PHY Specific Control Register */
764
765/*
766 * Number of times we will attempt to autonegotiate before downshifting if we
767 * are the master
768 */
769#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
770#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
771#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
772#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
773#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
774#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
775#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
776#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
777#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
778#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
779
780/* i347-AT4 PHY Cable Diagnostics Control */
781#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
782
783/* Marvell 1112 only registers */
784#define M88E1112_VCT_DSP_DISTANCE 0x001A
785
786/* M88EC018 Rev 2 specific DownShift settings */
787#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
788#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
789
790/* MDI Control */
791#define E1000_MDIC_DATA_MASK 0x0000FFFF
792#define E1000_MDIC_REG_MASK 0x001F0000
793#define E1000_MDIC_REG_SHIFT 16
794#define E1000_MDIC_PHY_MASK 0x03E00000
795#define E1000_MDIC_PHY_SHIFT 21
796#define E1000_MDIC_OP_WRITE 0x04000000
797#define E1000_MDIC_OP_READ 0x08000000
798#define E1000_MDIC_READY 0x10000000
799#define E1000_MDIC_INT_EN 0x20000000
800#define E1000_MDIC_ERROR 0x40000000
801#define E1000_MDIC_DEST 0x80000000
802
803/* Thermal Sensor */
804#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
805#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
806
807/* Energy Efficient Ethernet */
808#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
809#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
810#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
811#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
812#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
813
814/* SerDes Control */
815#define E1000_GEN_CTL_READY 0x80000000
816#define E1000_GEN_CTL_ADDRESS_SHIFT 8
817#define E1000_GEN_POLL_TIMEOUT 640
818
819#define E1000_VFTA_ENTRY_SHIFT 5
820#define E1000_VFTA_ENTRY_MASK 0x7F
821#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
822
823/* DMA Coalescing register fields */
824#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
825 on DMA coal */
826
827/* Tx Rate-Scheduler Config fields */
828#define E1000_RTTBCNRC_RS_ENA 0x80000000
829#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
830#define E1000_RTTBCNRC_RF_INT_SHIFT 14
831#define E1000_RTTBCNRC_RF_INT_MASK \
832 (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
833
834#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
new file mode 100644
index 00000000000..4519a136717
--- /dev/null
+++ b/drivers/net/igb/e1000_hw.h
@@ -0,0 +1,529 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_HW_H_
29#define _E1000_HW_H_
30
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/io.h>
34#include <linux/netdevice.h>
35
36#include "e1000_regs.h"
37#include "e1000_defines.h"
38
39struct e1000_hw;
40
41#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7
44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
45#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
46#define E1000_DEV_ID_82576_NS 0x150A
47#define E1000_DEV_ID_82576_NS_SERDES 0x1518
48#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
49#define E1000_DEV_ID_82575EB_COPPER 0x10A7
50#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
51#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
52#define E1000_DEV_ID_82580_COPPER 0x150E
53#define E1000_DEV_ID_82580_FIBER 0x150F
54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
61#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
62#define E1000_DEV_ID_I350_COPPER 0x1521
63#define E1000_DEV_ID_I350_FIBER 0x1522
64#define E1000_DEV_ID_I350_SERDES 0x1523
65#define E1000_DEV_ID_I350_SGMII 0x1524
66
67#define E1000_REVISION_2 2
68#define E1000_REVISION_4 4
69
70#define E1000_FUNC_0 0
71#define E1000_FUNC_1 1
72#define E1000_FUNC_2 2
73#define E1000_FUNC_3 3
74
75#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
76#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
77#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
78#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
79
80enum e1000_mac_type {
81 e1000_undefined = 0,
82 e1000_82575,
83 e1000_82576,
84 e1000_82580,
85 e1000_i350,
86 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
87};
88
89enum e1000_media_type {
90 e1000_media_type_unknown = 0,
91 e1000_media_type_copper = 1,
92 e1000_media_type_internal_serdes = 2,
93 e1000_num_media_types
94};
95
96enum e1000_nvm_type {
97 e1000_nvm_unknown = 0,
98 e1000_nvm_none,
99 e1000_nvm_eeprom_spi,
100 e1000_nvm_flash_hw,
101 e1000_nvm_flash_sw
102};
103
104enum e1000_nvm_override {
105 e1000_nvm_override_none = 0,
106 e1000_nvm_override_spi_small,
107 e1000_nvm_override_spi_large,
108};
109
110enum e1000_phy_type {
111 e1000_phy_unknown = 0,
112 e1000_phy_none,
113 e1000_phy_m88,
114 e1000_phy_igp,
115 e1000_phy_igp_2,
116 e1000_phy_gg82563,
117 e1000_phy_igp_3,
118 e1000_phy_ife,
119 e1000_phy_82580,
120};
121
122enum e1000_bus_type {
123 e1000_bus_type_unknown = 0,
124 e1000_bus_type_pci,
125 e1000_bus_type_pcix,
126 e1000_bus_type_pci_express,
127 e1000_bus_type_reserved
128};
129
130enum e1000_bus_speed {
131 e1000_bus_speed_unknown = 0,
132 e1000_bus_speed_33,
133 e1000_bus_speed_66,
134 e1000_bus_speed_100,
135 e1000_bus_speed_120,
136 e1000_bus_speed_133,
137 e1000_bus_speed_2500,
138 e1000_bus_speed_5000,
139 e1000_bus_speed_reserved
140};
141
142enum e1000_bus_width {
143 e1000_bus_width_unknown = 0,
144 e1000_bus_width_pcie_x1,
145 e1000_bus_width_pcie_x2,
146 e1000_bus_width_pcie_x4 = 4,
147 e1000_bus_width_pcie_x8 = 8,
148 e1000_bus_width_32,
149 e1000_bus_width_64,
150 e1000_bus_width_reserved
151};
152
153enum e1000_1000t_rx_status {
154 e1000_1000t_rx_status_not_ok = 0,
155 e1000_1000t_rx_status_ok,
156 e1000_1000t_rx_status_undefined = 0xFF
157};
158
159enum e1000_rev_polarity {
160 e1000_rev_polarity_normal = 0,
161 e1000_rev_polarity_reversed,
162 e1000_rev_polarity_undefined = 0xFF
163};
164
165enum e1000_fc_mode {
166 e1000_fc_none = 0,
167 e1000_fc_rx_pause,
168 e1000_fc_tx_pause,
169 e1000_fc_full,
170 e1000_fc_default = 0xFF
171};
172
173/* Statistics counters collected by the MAC */
174struct e1000_hw_stats {
175 u64 crcerrs;
176 u64 algnerrc;
177 u64 symerrs;
178 u64 rxerrc;
179 u64 mpc;
180 u64 scc;
181 u64 ecol;
182 u64 mcc;
183 u64 latecol;
184 u64 colc;
185 u64 dc;
186 u64 tncrs;
187 u64 sec;
188 u64 cexterr;
189 u64 rlec;
190 u64 xonrxc;
191 u64 xontxc;
192 u64 xoffrxc;
193 u64 xofftxc;
194 u64 fcruc;
195 u64 prc64;
196 u64 prc127;
197 u64 prc255;
198 u64 prc511;
199 u64 prc1023;
200 u64 prc1522;
201 u64 gprc;
202 u64 bprc;
203 u64 mprc;
204 u64 gptc;
205 u64 gorc;
206 u64 gotc;
207 u64 rnbc;
208 u64 ruc;
209 u64 rfc;
210 u64 roc;
211 u64 rjc;
212 u64 mgprc;
213 u64 mgpdc;
214 u64 mgptc;
215 u64 tor;
216 u64 tot;
217 u64 tpr;
218 u64 tpt;
219 u64 ptc64;
220 u64 ptc127;
221 u64 ptc255;
222 u64 ptc511;
223 u64 ptc1023;
224 u64 ptc1522;
225 u64 mptc;
226 u64 bptc;
227 u64 tsctc;
228 u64 tsctfc;
229 u64 iac;
230 u64 icrxptc;
231 u64 icrxatc;
232 u64 ictxptc;
233 u64 ictxatc;
234 u64 ictxqec;
235 u64 ictxqmtc;
236 u64 icrxdmtc;
237 u64 icrxoc;
238 u64 cbtmpc;
239 u64 htdpmc;
240 u64 cbrdpc;
241 u64 cbrmpc;
242 u64 rpthc;
243 u64 hgptc;
244 u64 htcbdpc;
245 u64 hgorc;
246 u64 hgotc;
247 u64 lenerrs;
248 u64 scvpc;
249 u64 hrmpc;
250 u64 doosync;
251 u64 o2bgptc;
252 u64 o2bspc;
253 u64 b2ospc;
254 u64 b2ogprc;
255};
256
257struct e1000_phy_stats {
258 u32 idle_errors;
259 u32 receive_errors;
260};
261
262struct e1000_host_mng_dhcp_cookie {
263 u32 signature;
264 u8 status;
265 u8 reserved0;
266 u16 vlan_id;
267 u32 reserved1;
268 u16 reserved2;
269 u8 reserved3;
270 u8 checksum;
271};
272
273/* Host Interface "Rev 1" */
274struct e1000_host_command_header {
275 u8 command_id;
276 u8 command_length;
277 u8 command_options;
278 u8 checksum;
279};
280
281#define E1000_HI_MAX_DATA_LENGTH 252
282struct e1000_host_command_info {
283 struct e1000_host_command_header command_header;
284 u8 command_data[E1000_HI_MAX_DATA_LENGTH];
285};
286
287/* Host Interface "Rev 2" */
288struct e1000_host_mng_command_header {
289 u8 command_id;
290 u8 checksum;
291 u16 reserved1;
292 u16 reserved2;
293 u16 command_length;
294};
295
296#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
297struct e1000_host_mng_command_info {
298 struct e1000_host_mng_command_header command_header;
299 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
300};
301
302#include "e1000_mac.h"
303#include "e1000_phy.h"
304#include "e1000_nvm.h"
305#include "e1000_mbx.h"
306
307struct e1000_mac_operations {
308 s32 (*check_for_link)(struct e1000_hw *);
309 s32 (*reset_hw)(struct e1000_hw *);
310 s32 (*init_hw)(struct e1000_hw *);
311 bool (*check_mng_mode)(struct e1000_hw *);
312 s32 (*setup_physical_interface)(struct e1000_hw *);
313 void (*rar_set)(struct e1000_hw *, u8 *, u32);
314 s32 (*read_mac_addr)(struct e1000_hw *);
315 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
316};
317
318struct e1000_phy_operations {
319 s32 (*acquire)(struct e1000_hw *);
320 s32 (*check_polarity)(struct e1000_hw *);
321 s32 (*check_reset_block)(struct e1000_hw *);
322 s32 (*force_speed_duplex)(struct e1000_hw *);
323 s32 (*get_cfg_done)(struct e1000_hw *hw);
324 s32 (*get_cable_length)(struct e1000_hw *);
325 s32 (*get_phy_info)(struct e1000_hw *);
326 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
327 void (*release)(struct e1000_hw *);
328 s32 (*reset)(struct e1000_hw *);
329 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
330 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
331 s32 (*write_reg)(struct e1000_hw *, u32, u16);
332};
333
334struct e1000_nvm_operations {
335 s32 (*acquire)(struct e1000_hw *);
336 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
337 void (*release)(struct e1000_hw *);
338 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
339 s32 (*update)(struct e1000_hw *);
340 s32 (*validate)(struct e1000_hw *);
341};
342
343struct e1000_info {
344 s32 (*get_invariants)(struct e1000_hw *);
345 struct e1000_mac_operations *mac_ops;
346 struct e1000_phy_operations *phy_ops;
347 struct e1000_nvm_operations *nvm_ops;
348};
349
350extern const struct e1000_info e1000_82575_info;
351
352struct e1000_mac_info {
353 struct e1000_mac_operations ops;
354
355 u8 addr[6];
356 u8 perm_addr[6];
357
358 enum e1000_mac_type type;
359
360 u32 ledctl_default;
361 u32 ledctl_mode1;
362 u32 ledctl_mode2;
363 u32 mc_filter_type;
364 u32 txcw;
365
366 u16 mta_reg_count;
367 u16 uta_reg_count;
368
369 /* Maximum size of the MTA register table in all supported adapters */
370 #define MAX_MTA_REG 128
371 u32 mta_shadow[MAX_MTA_REG];
372 u16 rar_entry_count;
373
374 u8 forced_speed_duplex;
375
376 bool adaptive_ifs;
377 bool arc_subsystem_valid;
378 bool asf_firmware_present;
379 bool autoneg;
380 bool autoneg_failed;
381 bool disable_hw_init_bits;
382 bool get_link_status;
383 bool ifs_params_forced;
384 bool in_ifs_mode;
385 bool report_tx_early;
386 bool serdes_has_link;
387 bool tx_pkt_filtering;
388};
389
390struct e1000_phy_info {
391 struct e1000_phy_operations ops;
392
393 enum e1000_phy_type type;
394
395 enum e1000_1000t_rx_status local_rx;
396 enum e1000_1000t_rx_status remote_rx;
397 enum e1000_ms_type ms_type;
398 enum e1000_ms_type original_ms_type;
399 enum e1000_rev_polarity cable_polarity;
400 enum e1000_smart_speed smart_speed;
401
402 u32 addr;
403 u32 id;
404 u32 reset_delay_us; /* in usec */
405 u32 revision;
406
407 enum e1000_media_type media_type;
408
409 u16 autoneg_advertised;
410 u16 autoneg_mask;
411 u16 cable_length;
412 u16 max_cable_length;
413 u16 min_cable_length;
414
415 u8 mdix;
416
417 bool disable_polarity_correction;
418 bool is_mdix;
419 bool polarity_correction;
420 bool reset_disable;
421 bool speed_downgraded;
422 bool autoneg_wait_to_complete;
423};
424
425struct e1000_nvm_info {
426 struct e1000_nvm_operations ops;
427 enum e1000_nvm_type type;
428 enum e1000_nvm_override override;
429
430 u32 flash_bank_size;
431 u32 flash_base_addr;
432
433 u16 word_size;
434 u16 delay_usec;
435 u16 address_bits;
436 u16 opcode_bits;
437 u16 page_size;
438};
439
440struct e1000_bus_info {
441 enum e1000_bus_type type;
442 enum e1000_bus_speed speed;
443 enum e1000_bus_width width;
444
445 u32 snoop;
446
447 u16 func;
448 u16 pci_cmd_word;
449};
450
451struct e1000_fc_info {
452 u32 high_water; /* Flow control high-water mark */
453 u32 low_water; /* Flow control low-water mark */
454 u16 pause_time; /* Flow control pause timer */
455 bool send_xon; /* Flow control send XON */
456 bool strict_ieee; /* Strict IEEE mode */
457 enum e1000_fc_mode current_mode; /* Type of flow control */
458 enum e1000_fc_mode requested_mode;
459};
460
461struct e1000_mbx_operations {
462 s32 (*init_params)(struct e1000_hw *hw);
463 s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
464 s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
465 s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
466 s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
467 s32 (*check_for_msg)(struct e1000_hw *, u16);
468 s32 (*check_for_ack)(struct e1000_hw *, u16);
469 s32 (*check_for_rst)(struct e1000_hw *, u16);
470};
471
472struct e1000_mbx_stats {
473 u32 msgs_tx;
474 u32 msgs_rx;
475
476 u32 acks;
477 u32 reqs;
478 u32 rsts;
479};
480
481struct e1000_mbx_info {
482 struct e1000_mbx_operations ops;
483 struct e1000_mbx_stats stats;
484 u32 timeout;
485 u32 usec_delay;
486 u16 size;
487};
488
489struct e1000_dev_spec_82575 {
490 bool sgmii_active;
491 bool global_device_reset;
492 bool eee_disable;
493};
494
495struct e1000_hw {
496 void *back;
497
498 u8 __iomem *hw_addr;
499 u8 __iomem *flash_address;
500 unsigned long io_base;
501
502 struct e1000_mac_info mac;
503 struct e1000_fc_info fc;
504 struct e1000_phy_info phy;
505 struct e1000_nvm_info nvm;
506 struct e1000_bus_info bus;
507 struct e1000_mbx_info mbx;
508 struct e1000_host_mng_dhcp_cookie mng_cookie;
509
510 union {
511 struct e1000_dev_spec_82575 _82575;
512 } dev_spec;
513
514 u16 device_id;
515 u16 subsystem_vendor_id;
516 u16 subsystem_device_id;
517 u16 vendor_id;
518
519 u8 revision_id;
520};
521
522extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
523#define hw_dbg(format, arg...) \
524 netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
525
526/* These functions must be implemented by drivers */
527s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
528s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
529#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
new file mode 100644
index 00000000000..2b5ef761d2a
--- /dev/null
+++ b/drivers/net/igb/e1000_mac.c
@@ -0,0 +1,1421 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33
34#include "e1000_mac.h"
35
36#include "igb.h"
37
38static s32 igb_set_default_fc(struct e1000_hw *hw);
39static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
40
41/**
42 * igb_get_bus_info_pcie - Get PCIe bus information
43 * @hw: pointer to the HW structure
44 *
45 * Determines and stores the system bus information for a particular
46 * network interface. The following bus information is determined and stored:
47 * bus speed, bus width, type (PCIe), and PCIe function.
48 **/
49s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
50{
51 struct e1000_bus_info *bus = &hw->bus;
52 s32 ret_val;
53 u32 reg;
54 u16 pcie_link_status;
55
56 bus->type = e1000_bus_type_pci_express;
57
58 ret_val = igb_read_pcie_cap_reg(hw,
59 PCI_EXP_LNKSTA,
60 &pcie_link_status);
61 if (ret_val) {
62 bus->width = e1000_bus_width_unknown;
63 bus->speed = e1000_bus_speed_unknown;
64 } else {
65 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
66 case PCI_EXP_LNKSTA_CLS_2_5GB:
67 bus->speed = e1000_bus_speed_2500;
68 break;
69 case PCI_EXP_LNKSTA_CLS_5_0GB:
70 bus->speed = e1000_bus_speed_5000;
71 break;
72 default:
73 bus->speed = e1000_bus_speed_unknown;
74 break;
75 }
76
77 bus->width = (enum e1000_bus_width)((pcie_link_status &
78 PCI_EXP_LNKSTA_NLW) >>
79 PCI_EXP_LNKSTA_NLW_SHIFT);
80 }
81
82 reg = rd32(E1000_STATUS);
83 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
84
85 return 0;
86}
87
88/**
89 * igb_clear_vfta - Clear VLAN filter table
90 * @hw: pointer to the HW structure
91 *
92 * Clears the register array which contains the VLAN filter table by
93 * setting all the values to 0.
94 **/
95void igb_clear_vfta(struct e1000_hw *hw)
96{
97 u32 offset;
98
99 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
100 array_wr32(E1000_VFTA, offset, 0);
101 wrfl();
102 }
103}
104
105/**
106 * igb_write_vfta - Write value to VLAN filter table
107 * @hw: pointer to the HW structure
108 * @offset: register offset in VLAN filter table
109 * @value: register value written to VLAN filter table
110 *
111 * Writes value at the given offset in the register array which stores
112 * the VLAN filter table.
113 **/
114static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
115{
116 array_wr32(E1000_VFTA, offset, value);
117 wrfl();
118}
119
120/**
121 * igb_init_rx_addrs - Initialize receive address's
122 * @hw: pointer to the HW structure
123 * @rar_count: receive address registers
124 *
125 * Setups the receive address registers by setting the base receive address
126 * register to the devices MAC address and clearing all the other receive
127 * address registers to 0.
128 **/
129void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
130{
131 u32 i;
132 u8 mac_addr[ETH_ALEN] = {0};
133
134 /* Setup the receive address */
135 hw_dbg("Programming MAC Address into RAR[0]\n");
136
137 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
138
139 /* Zero out the other (rar_entry_count - 1) receive addresses */
140 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
141 for (i = 1; i < rar_count; i++)
142 hw->mac.ops.rar_set(hw, mac_addr, i);
143}
144
145/**
146 * igb_vfta_set - enable or disable vlan in VLAN filter table
147 * @hw: pointer to the HW structure
148 * @vid: VLAN id to add or remove
149 * @add: if true add filter, if false remove
150 *
151 * Sets or clears a bit in the VLAN filter table array based on VLAN id
152 * and if we are adding or removing the filter
153 **/
154s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
155{
156 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
157 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
158 u32 vfta = array_rd32(E1000_VFTA, index);
159 s32 ret_val = 0;
160
161 /* bit was set/cleared before we started */
162 if ((!!(vfta & mask)) == add) {
163 ret_val = -E1000_ERR_CONFIG;
164 } else {
165 if (add)
166 vfta |= mask;
167 else
168 vfta &= ~mask;
169 }
170
171 igb_write_vfta(hw, index, vfta);
172
173 return ret_val;
174}
175
176/**
177 * igb_check_alt_mac_addr - Check for alternate MAC addr
178 * @hw: pointer to the HW structure
179 *
180 * Checks the nvm for an alternate MAC address. An alternate MAC address
181 * can be setup by pre-boot software and must be treated like a permanent
182 * address and must override the actual permanent MAC address. If an
183 * alternate MAC address is fopund it is saved in the hw struct and
184 * prgrammed into RAR0 and the cuntion returns success, otherwise the
185 * function returns an error.
186 **/
187s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
188{
189 u32 i;
190 s32 ret_val = 0;
191 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
192 u8 alt_mac_addr[ETH_ALEN];
193
194 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
195 &nvm_alt_mac_addr_offset);
196 if (ret_val) {
197 hw_dbg("NVM Read Error\n");
198 goto out;
199 }
200
201 if (nvm_alt_mac_addr_offset == 0xFFFF) {
202 /* There is no Alternate MAC Address */
203 goto out;
204 }
205
206 if (hw->bus.func == E1000_FUNC_1)
207 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
208 for (i = 0; i < ETH_ALEN; i += 2) {
209 offset = nvm_alt_mac_addr_offset + (i >> 1);
210 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
211 if (ret_val) {
212 hw_dbg("NVM Read Error\n");
213 goto out;
214 }
215
216 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
217 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
218 }
219
220 /* if multicast bit is set, the alternate address will not be used */
221 if (is_multicast_ether_addr(alt_mac_addr)) {
222 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
223 goto out;
224 }
225
226 /*
227 * We have a valid alternate MAC address, and we want to treat it the
228 * same as the normal permanent MAC address stored by the HW into the
229 * RAR. Do this by mapping this address into RAR0.
230 */
231 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
232
233out:
234 return ret_val;
235}
236
237/**
238 * igb_rar_set - Set receive address register
239 * @hw: pointer to the HW structure
240 * @addr: pointer to the receive address
241 * @index: receive address array register
242 *
243 * Sets the receive address array register at index to the address passed
244 * in by addr.
245 **/
246void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
247{
248 u32 rar_low, rar_high;
249
250 /*
251 * HW expects these in little endian so we reverse the byte order
252 * from network order (big endian) to little endian
253 */
254 rar_low = ((u32) addr[0] |
255 ((u32) addr[1] << 8) |
256 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
257
258 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
259
260 /* If MAC address zero, no need to set the AV bit */
261 if (rar_low || rar_high)
262 rar_high |= E1000_RAH_AV;
263
264 /*
265 * Some bridges will combine consecutive 32-bit writes into
266 * a single burst write, which will malfunction on some parts.
267 * The flushes avoid this.
268 */
269 wr32(E1000_RAL(index), rar_low);
270 wrfl();
271 wr32(E1000_RAH(index), rar_high);
272 wrfl();
273}
274
275/**
276 * igb_mta_set - Set multicast filter table address
277 * @hw: pointer to the HW structure
278 * @hash_value: determines the MTA register and bit to set
279 *
280 * The multicast table address is a register array of 32-bit registers.
281 * The hash_value is used to determine what register the bit is in, the
282 * current value is read, the new bit is OR'd in and the new value is
283 * written back into the register.
284 **/
285void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
286{
287 u32 hash_bit, hash_reg, mta;
288
289 /*
290 * The MTA is a register array of 32-bit registers. It is
291 * treated like an array of (32*mta_reg_count) bits. We want to
292 * set bit BitArray[hash_value]. So we figure out what register
293 * the bit is in, read it, OR in the new bit, then write
294 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
295 * mask to bits 31:5 of the hash value which gives us the
296 * register we're modifying. The hash bit within that register
297 * is determined by the lower 5 bits of the hash value.
298 */
299 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
300 hash_bit = hash_value & 0x1F;
301
302 mta = array_rd32(E1000_MTA, hash_reg);
303
304 mta |= (1 << hash_bit);
305
306 array_wr32(E1000_MTA, hash_reg, mta);
307 wrfl();
308}
309
310/**
311 * igb_hash_mc_addr - Generate a multicast hash value
312 * @hw: pointer to the HW structure
313 * @mc_addr: pointer to a multicast address
314 *
315 * Generates a multicast address hash value which is used to determine
316 * the multicast filter table array address and new table value. See
317 * igb_mta_set()
318 **/
319static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
320{
321 u32 hash_value, hash_mask;
322 u8 bit_shift = 0;
323
324 /* Register count multiplied by bits per register */
325 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
326
327 /*
328 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
329 * where 0xFF would still fall within the hash mask.
330 */
331 while (hash_mask >> bit_shift != 0xFF)
332 bit_shift++;
333
334 /*
335 * The portion of the address that is used for the hash table
336 * is determined by the mc_filter_type setting.
337 * The algorithm is such that there is a total of 8 bits of shifting.
338 * The bit_shift for a mc_filter_type of 0 represents the number of
339 * left-shifts where the MSB of mc_addr[5] would still fall within
340 * the hash_mask. Case 0 does this exactly. Since there are a total
341 * of 8 bits of shifting, then mc_addr[4] will shift right the
342 * remaining number of bits. Thus 8 - bit_shift. The rest of the
343 * cases are a variation of this algorithm...essentially raising the
344 * number of bits to shift mc_addr[5] left, while still keeping the
345 * 8-bit shifting total.
346 *
347 * For example, given the following Destination MAC Address and an
348 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
349 * we can see that the bit_shift for case 0 is 4. These are the hash
350 * values resulting from each mc_filter_type...
351 * [0] [1] [2] [3] [4] [5]
352 * 01 AA 00 12 34 56
353 * LSB MSB
354 *
355 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
356 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
357 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
358 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
359 */
360 switch (hw->mac.mc_filter_type) {
361 default:
362 case 0:
363 break;
364 case 1:
365 bit_shift += 1;
366 break;
367 case 2:
368 bit_shift += 2;
369 break;
370 case 3:
371 bit_shift += 4;
372 break;
373 }
374
375 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
376 (((u16) mc_addr[5]) << bit_shift)));
377
378 return hash_value;
379}
380
381/**
382 * igb_update_mc_addr_list - Update Multicast addresses
383 * @hw: pointer to the HW structure
384 * @mc_addr_list: array of multicast addresses to program
385 * @mc_addr_count: number of multicast addresses to program
386 *
387 * Updates entire Multicast Table Array.
388 * The caller must have a packed mc_addr_list of multicast addresses.
389 **/
390void igb_update_mc_addr_list(struct e1000_hw *hw,
391 u8 *mc_addr_list, u32 mc_addr_count)
392{
393 u32 hash_value, hash_bit, hash_reg;
394 int i;
395
396 /* clear mta_shadow */
397 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
398
399 /* update mta_shadow from mc_addr_list */
400 for (i = 0; (u32) i < mc_addr_count; i++) {
401 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
402
403 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
404 hash_bit = hash_value & 0x1F;
405
406 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
407 mc_addr_list += (ETH_ALEN);
408 }
409
410 /* replace the entire MTA table */
411 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
412 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
413 wrfl();
414}
415
416/**
417 * igb_clear_hw_cntrs_base - Clear base hardware counters
418 * @hw: pointer to the HW structure
419 *
420 * Clears the base hardware counters by reading the counter registers.
421 **/
422void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
423{
424 rd32(E1000_CRCERRS);
425 rd32(E1000_SYMERRS);
426 rd32(E1000_MPC);
427 rd32(E1000_SCC);
428 rd32(E1000_ECOL);
429 rd32(E1000_MCC);
430 rd32(E1000_LATECOL);
431 rd32(E1000_COLC);
432 rd32(E1000_DC);
433 rd32(E1000_SEC);
434 rd32(E1000_RLEC);
435 rd32(E1000_XONRXC);
436 rd32(E1000_XONTXC);
437 rd32(E1000_XOFFRXC);
438 rd32(E1000_XOFFTXC);
439 rd32(E1000_FCRUC);
440 rd32(E1000_GPRC);
441 rd32(E1000_BPRC);
442 rd32(E1000_MPRC);
443 rd32(E1000_GPTC);
444 rd32(E1000_GORCL);
445 rd32(E1000_GORCH);
446 rd32(E1000_GOTCL);
447 rd32(E1000_GOTCH);
448 rd32(E1000_RNBC);
449 rd32(E1000_RUC);
450 rd32(E1000_RFC);
451 rd32(E1000_ROC);
452 rd32(E1000_RJC);
453 rd32(E1000_TORL);
454 rd32(E1000_TORH);
455 rd32(E1000_TOTL);
456 rd32(E1000_TOTH);
457 rd32(E1000_TPR);
458 rd32(E1000_TPT);
459 rd32(E1000_MPTC);
460 rd32(E1000_BPTC);
461}
462
463/**
464 * igb_check_for_copper_link - Check for link (Copper)
465 * @hw: pointer to the HW structure
466 *
467 * Checks to see of the link status of the hardware has changed. If a
468 * change in link status has been detected, then we read the PHY registers
469 * to get the current speed/duplex if link exists.
470 **/
471s32 igb_check_for_copper_link(struct e1000_hw *hw)
472{
473 struct e1000_mac_info *mac = &hw->mac;
474 s32 ret_val;
475 bool link;
476
477 /*
478 * We only want to go out to the PHY registers to see if Auto-Neg
479 * has completed and/or if our link status has changed. The
480 * get_link_status flag is set upon receiving a Link Status
481 * Change or Rx Sequence Error interrupt.
482 */
483 if (!mac->get_link_status) {
484 ret_val = 0;
485 goto out;
486 }
487
488 /*
489 * First we want to see if the MII Status Register reports
490 * link. If so, then we want to get the current speed/duplex
491 * of the PHY.
492 */
493 ret_val = igb_phy_has_link(hw, 1, 0, &link);
494 if (ret_val)
495 goto out;
496
497 if (!link)
498 goto out; /* No link detected */
499
500 mac->get_link_status = false;
501
502 /*
503 * Check if there was DownShift, must be checked
504 * immediately after link-up
505 */
506 igb_check_downshift(hw);
507
508 /*
509 * If we are forcing speed/duplex, then we simply return since
510 * we have already determined whether we have link or not.
511 */
512 if (!mac->autoneg) {
513 ret_val = -E1000_ERR_CONFIG;
514 goto out;
515 }
516
517 /*
518 * Auto-Neg is enabled. Auto Speed Detection takes care
519 * of MAC speed/duplex configuration. So we only need to
520 * configure Collision Distance in the MAC.
521 */
522 igb_config_collision_dist(hw);
523
524 /*
525 * Configure Flow Control now that Auto-Neg has completed.
526 * First, we need to restore the desired flow control
527 * settings because we may have had to re-autoneg with a
528 * different link partner.
529 */
530 ret_val = igb_config_fc_after_link_up(hw);
531 if (ret_val)
532 hw_dbg("Error configuring flow control\n");
533
534out:
535 return ret_val;
536}
537
538/**
539 * igb_setup_link - Setup flow control and link settings
540 * @hw: pointer to the HW structure
541 *
542 * Determines which flow control settings to use, then configures flow
543 * control. Calls the appropriate media-specific link configuration
544 * function. Assuming the adapter has a valid link partner, a valid link
545 * should be established. Assumes the hardware has previously been reset
546 * and the transmitter and receiver are not enabled.
547 **/
548s32 igb_setup_link(struct e1000_hw *hw)
549{
550 s32 ret_val = 0;
551
552 /*
553 * In the case of the phy reset being blocked, we already have a link.
554 * We do not need to set it up again.
555 */
556 if (igb_check_reset_block(hw))
557 goto out;
558
559 /*
560 * If requested flow control is set to default, set flow control
561 * based on the EEPROM flow control settings.
562 */
563 if (hw->fc.requested_mode == e1000_fc_default) {
564 ret_val = igb_set_default_fc(hw);
565 if (ret_val)
566 goto out;
567 }
568
569 /*
570 * We want to save off the original Flow Control configuration just
571 * in case we get disconnected and then reconnected into a different
572 * hub or switch with different Flow Control capabilities.
573 */
574 hw->fc.current_mode = hw->fc.requested_mode;
575
576 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
577
578 /* Call the necessary media_type subroutine to configure the link. */
579 ret_val = hw->mac.ops.setup_physical_interface(hw);
580 if (ret_val)
581 goto out;
582
583 /*
584 * Initialize the flow control address, type, and PAUSE timer
585 * registers to their default values. This is done even if flow
586 * control is disabled, because it does not hurt anything to
587 * initialize these registers.
588 */
589 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
590 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
591 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
592 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
593
594 wr32(E1000_FCTTV, hw->fc.pause_time);
595
596 ret_val = igb_set_fc_watermarks(hw);
597
598out:
599 return ret_val;
600}
601
602/**
603 * igb_config_collision_dist - Configure collision distance
604 * @hw: pointer to the HW structure
605 *
606 * Configures the collision distance to the default value and is used
607 * during link setup. Currently no func pointer exists and all
608 * implementations are handled in the generic version of this function.
609 **/
610void igb_config_collision_dist(struct e1000_hw *hw)
611{
612 u32 tctl;
613
614 tctl = rd32(E1000_TCTL);
615
616 tctl &= ~E1000_TCTL_COLD;
617 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
618
619 wr32(E1000_TCTL, tctl);
620 wrfl();
621}
622
623/**
624 * igb_set_fc_watermarks - Set flow control high/low watermarks
625 * @hw: pointer to the HW structure
626 *
627 * Sets the flow control high/low threshold (watermark) registers. If
628 * flow control XON frame transmission is enabled, then set XON frame
629 * tansmission as well.
630 **/
631static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
632{
633 s32 ret_val = 0;
634 u32 fcrtl = 0, fcrth = 0;
635
636 /*
637 * Set the flow control receive threshold registers. Normally,
638 * these registers will be set to a default threshold that may be
639 * adjusted later by the driver's runtime code. However, if the
640 * ability to transmit pause frames is not enabled, then these
641 * registers will be set to 0.
642 */
643 if (hw->fc.current_mode & e1000_fc_tx_pause) {
644 /*
645 * We need to set up the Receive Threshold high and low water
646 * marks as well as (optionally) enabling the transmission of
647 * XON frames.
648 */
649 fcrtl = hw->fc.low_water;
650 if (hw->fc.send_xon)
651 fcrtl |= E1000_FCRTL_XONE;
652
653 fcrth = hw->fc.high_water;
654 }
655 wr32(E1000_FCRTL, fcrtl);
656 wr32(E1000_FCRTH, fcrth);
657
658 return ret_val;
659}
660
661/**
662 * igb_set_default_fc - Set flow control default values
663 * @hw: pointer to the HW structure
664 *
665 * Read the EEPROM for the default values for flow control and store the
666 * values.
667 **/
668static s32 igb_set_default_fc(struct e1000_hw *hw)
669{
670 s32 ret_val = 0;
671 u16 nvm_data;
672
673 /*
674 * Read and store word 0x0F of the EEPROM. This word contains bits
675 * that determine the hardware's default PAUSE (flow control) mode,
676 * a bit that determines whether the HW defaults to enabling or
677 * disabling auto-negotiation, and the direction of the
678 * SW defined pins. If there is no SW over-ride of the flow
679 * control setting, then the variable hw->fc will
680 * be initialized based on a value in the EEPROM.
681 */
682 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
683
684 if (ret_val) {
685 hw_dbg("NVM Read Error\n");
686 goto out;
687 }
688
689 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
690 hw->fc.requested_mode = e1000_fc_none;
691 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
692 NVM_WORD0F_ASM_DIR)
693 hw->fc.requested_mode = e1000_fc_tx_pause;
694 else
695 hw->fc.requested_mode = e1000_fc_full;
696
697out:
698 return ret_val;
699}
700
701/**
702 * igb_force_mac_fc - Force the MAC's flow control settings
703 * @hw: pointer to the HW structure
704 *
705 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
706 * device control register to reflect the adapter settings. TFCE and RFCE
707 * need to be explicitly set by software when a copper PHY is used because
708 * autonegotiation is managed by the PHY rather than the MAC. Software must
709 * also configure these bits when link is forced on a fiber connection.
710 **/
711s32 igb_force_mac_fc(struct e1000_hw *hw)
712{
713 u32 ctrl;
714 s32 ret_val = 0;
715
716 ctrl = rd32(E1000_CTRL);
717
718 /*
719 * Because we didn't get link via the internal auto-negotiation
720 * mechanism (we either forced link or we got link via PHY
721 * auto-neg), we have to manually enable/disable transmit an
722 * receive flow control.
723 *
724 * The "Case" statement below enables/disable flow control
725 * according to the "hw->fc.current_mode" parameter.
726 *
727 * The possible values of the "fc" parameter are:
728 * 0: Flow control is completely disabled
729 * 1: Rx flow control is enabled (we can receive pause
730 * frames but not send pause frames).
731 * 2: Tx flow control is enabled (we can send pause frames
732 * frames but we do not receive pause frames).
733 * 3: Both Rx and TX flow control (symmetric) is enabled.
734 * other: No other values should be possible at this point.
735 */
736 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
737
738 switch (hw->fc.current_mode) {
739 case e1000_fc_none:
740 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
741 break;
742 case e1000_fc_rx_pause:
743 ctrl &= (~E1000_CTRL_TFCE);
744 ctrl |= E1000_CTRL_RFCE;
745 break;
746 case e1000_fc_tx_pause:
747 ctrl &= (~E1000_CTRL_RFCE);
748 ctrl |= E1000_CTRL_TFCE;
749 break;
750 case e1000_fc_full:
751 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
752 break;
753 default:
754 hw_dbg("Flow control param set incorrectly\n");
755 ret_val = -E1000_ERR_CONFIG;
756 goto out;
757 }
758
759 wr32(E1000_CTRL, ctrl);
760
761out:
762 return ret_val;
763}
764
765/**
766 * igb_config_fc_after_link_up - Configures flow control after link
767 * @hw: pointer to the HW structure
768 *
769 * Checks the status of auto-negotiation after link up to ensure that the
770 * speed and duplex were not forced. If the link needed to be forced, then
771 * flow control needs to be forced also. If auto-negotiation is enabled
772 * and did not fail, then we configure flow control based on our link
773 * partner.
774 **/
775s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
776{
777 struct e1000_mac_info *mac = &hw->mac;
778 s32 ret_val = 0;
779 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
780 u16 speed, duplex;
781
782 /*
783 * Check for the case where we have fiber media and auto-neg failed
784 * so we had to force link. In this case, we need to force the
785 * configuration of the MAC to match the "fc" parameter.
786 */
787 if (mac->autoneg_failed) {
788 if (hw->phy.media_type == e1000_media_type_internal_serdes)
789 ret_val = igb_force_mac_fc(hw);
790 } else {
791 if (hw->phy.media_type == e1000_media_type_copper)
792 ret_val = igb_force_mac_fc(hw);
793 }
794
795 if (ret_val) {
796 hw_dbg("Error forcing flow control settings\n");
797 goto out;
798 }
799
800 /*
801 * Check for the case where we have copper media and auto-neg is
802 * enabled. In this case, we need to check and see if Auto-Neg
803 * has completed, and if so, how the PHY and link partner has
804 * flow control configured.
805 */
806 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
807 /*
808 * Read the MII Status Register and check to see if AutoNeg
809 * has completed. We read this twice because this reg has
810 * some "sticky" (latched) bits.
811 */
812 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
813 &mii_status_reg);
814 if (ret_val)
815 goto out;
816 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
817 &mii_status_reg);
818 if (ret_val)
819 goto out;
820
821 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
822 hw_dbg("Copper PHY and Auto Neg "
823 "has not completed.\n");
824 goto out;
825 }
826
827 /*
828 * The AutoNeg process has completed, so we now need to
829 * read both the Auto Negotiation Advertisement
830 * Register (Address 4) and the Auto_Negotiation Base
831 * Page Ability Register (Address 5) to determine how
832 * flow control was negotiated.
833 */
834 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
835 &mii_nway_adv_reg);
836 if (ret_val)
837 goto out;
838 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
839 &mii_nway_lp_ability_reg);
840 if (ret_val)
841 goto out;
842
843 /*
844 * Two bits in the Auto Negotiation Advertisement Register
845 * (Address 4) and two bits in the Auto Negotiation Base
846 * Page Ability Register (Address 5) determine flow control
847 * for both the PHY and the link partner. The following
848 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
849 * 1999, describes these PAUSE resolution bits and how flow
850 * control is determined based upon these settings.
851 * NOTE: DC = Don't Care
852 *
853 * LOCAL DEVICE | LINK PARTNER
854 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
855 *-------|---------|-------|---------|--------------------
856 * 0 | 0 | DC | DC | e1000_fc_none
857 * 0 | 1 | 0 | DC | e1000_fc_none
858 * 0 | 1 | 1 | 0 | e1000_fc_none
859 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
860 * 1 | 0 | 0 | DC | e1000_fc_none
861 * 1 | DC | 1 | DC | e1000_fc_full
862 * 1 | 1 | 0 | 0 | e1000_fc_none
863 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
864 *
865 * Are both PAUSE bits set to 1? If so, this implies
866 * Symmetric Flow Control is enabled at both ends. The
867 * ASM_DIR bits are irrelevant per the spec.
868 *
869 * For Symmetric Flow Control:
870 *
871 * LOCAL DEVICE | LINK PARTNER
872 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
873 *-------|---------|-------|---------|--------------------
874 * 1 | DC | 1 | DC | E1000_fc_full
875 *
876 */
877 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
878 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
879 /*
880 * Now we need to check if the user selected RX ONLY
881 * of pause frames. In this case, we had to advertise
882 * FULL flow control because we could not advertise RX
883 * ONLY. Hence, we must now check to see if we need to
884 * turn OFF the TRANSMISSION of PAUSE frames.
885 */
886 if (hw->fc.requested_mode == e1000_fc_full) {
887 hw->fc.current_mode = e1000_fc_full;
888 hw_dbg("Flow Control = FULL.\r\n");
889 } else {
890 hw->fc.current_mode = e1000_fc_rx_pause;
891 hw_dbg("Flow Control = "
892 "RX PAUSE frames only.\r\n");
893 }
894 }
895 /*
896 * For receiving PAUSE frames ONLY.
897 *
898 * LOCAL DEVICE | LINK PARTNER
899 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
900 *-------|---------|-------|---------|--------------------
901 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
902 */
903 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
904 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
905 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
906 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
907 hw->fc.current_mode = e1000_fc_tx_pause;
908 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
909 }
910 /*
911 * For transmitting PAUSE frames ONLY.
912 *
913 * LOCAL DEVICE | LINK PARTNER
914 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
915 *-------|---------|-------|---------|--------------------
916 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
917 */
918 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
919 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
920 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
921 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
922 hw->fc.current_mode = e1000_fc_rx_pause;
923 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
924 }
925 /*
926 * Per the IEEE spec, at this point flow control should be
927 * disabled. However, we want to consider that we could
928 * be connected to a legacy switch that doesn't advertise
929 * desired flow control, but can be forced on the link
930 * partner. So if we advertised no flow control, that is
931 * what we will resolve to. If we advertised some kind of
932 * receive capability (Rx Pause Only or Full Flow Control)
933 * and the link partner advertised none, we will configure
934 * ourselves to enable Rx Flow Control only. We can do
935 * this safely for two reasons: If the link partner really
936 * didn't want flow control enabled, and we enable Rx, no
937 * harm done since we won't be receiving any PAUSE frames
938 * anyway. If the intent on the link partner was to have
939 * flow control enabled, then by us enabling RX only, we
940 * can at least receive pause frames and process them.
941 * This is a good idea because in most cases, since we are
942 * predominantly a server NIC, more times than not we will
943 * be asked to delay transmission of packets than asking
944 * our link partner to pause transmission of frames.
945 */
946 else if ((hw->fc.requested_mode == e1000_fc_none ||
947 hw->fc.requested_mode == e1000_fc_tx_pause) ||
948 hw->fc.strict_ieee) {
949 hw->fc.current_mode = e1000_fc_none;
950 hw_dbg("Flow Control = NONE.\r\n");
951 } else {
952 hw->fc.current_mode = e1000_fc_rx_pause;
953 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
954 }
955
956 /*
957 * Now we need to do one last check... If we auto-
958 * negotiated to HALF DUPLEX, flow control should not be
959 * enabled per IEEE 802.3 spec.
960 */
961 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
962 if (ret_val) {
963 hw_dbg("Error getting link speed and duplex\n");
964 goto out;
965 }
966
967 if (duplex == HALF_DUPLEX)
968 hw->fc.current_mode = e1000_fc_none;
969
970 /*
971 * Now we call a subroutine to actually force the MAC
972 * controller to use the correct flow control settings.
973 */
974 ret_val = igb_force_mac_fc(hw);
975 if (ret_val) {
976 hw_dbg("Error forcing flow control settings\n");
977 goto out;
978 }
979 }
980
981out:
982 return ret_val;
983}
984
985/**
986 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
987 * @hw: pointer to the HW structure
988 * @speed: stores the current speed
989 * @duplex: stores the current duplex
990 *
991 * Read the status register for the current speed/duplex and store the current
992 * speed and duplex for copper connections.
993 **/
994s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
995 u16 *duplex)
996{
997 u32 status;
998
999 status = rd32(E1000_STATUS);
1000 if (status & E1000_STATUS_SPEED_1000) {
1001 *speed = SPEED_1000;
1002 hw_dbg("1000 Mbs, ");
1003 } else if (status & E1000_STATUS_SPEED_100) {
1004 *speed = SPEED_100;
1005 hw_dbg("100 Mbs, ");
1006 } else {
1007 *speed = SPEED_10;
1008 hw_dbg("10 Mbs, ");
1009 }
1010
1011 if (status & E1000_STATUS_FD) {
1012 *duplex = FULL_DUPLEX;
1013 hw_dbg("Full Duplex\n");
1014 } else {
1015 *duplex = HALF_DUPLEX;
1016 hw_dbg("Half Duplex\n");
1017 }
1018
1019 return 0;
1020}
1021
1022/**
1023 * igb_get_hw_semaphore - Acquire hardware semaphore
1024 * @hw: pointer to the HW structure
1025 *
1026 * Acquire the HW semaphore to access the PHY or NVM
1027 **/
1028s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1029{
1030 u32 swsm;
1031 s32 ret_val = 0;
1032 s32 timeout = hw->nvm.word_size + 1;
1033 s32 i = 0;
1034
1035 /* Get the SW semaphore */
1036 while (i < timeout) {
1037 swsm = rd32(E1000_SWSM);
1038 if (!(swsm & E1000_SWSM_SMBI))
1039 break;
1040
1041 udelay(50);
1042 i++;
1043 }
1044
1045 if (i == timeout) {
1046 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1047 ret_val = -E1000_ERR_NVM;
1048 goto out;
1049 }
1050
1051 /* Get the FW semaphore. */
1052 for (i = 0; i < timeout; i++) {
1053 swsm = rd32(E1000_SWSM);
1054 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1055
1056 /* Semaphore acquired if bit latched */
1057 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1058 break;
1059
1060 udelay(50);
1061 }
1062
1063 if (i == timeout) {
1064 /* Release semaphores */
1065 igb_put_hw_semaphore(hw);
1066 hw_dbg("Driver can't access the NVM\n");
1067 ret_val = -E1000_ERR_NVM;
1068 goto out;
1069 }
1070
1071out:
1072 return ret_val;
1073}
1074
1075/**
1076 * igb_put_hw_semaphore - Release hardware semaphore
1077 * @hw: pointer to the HW structure
1078 *
1079 * Release hardware semaphore used to access the PHY or NVM
1080 **/
1081void igb_put_hw_semaphore(struct e1000_hw *hw)
1082{
1083 u32 swsm;
1084
1085 swsm = rd32(E1000_SWSM);
1086
1087 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1088
1089 wr32(E1000_SWSM, swsm);
1090}
1091
1092/**
1093 * igb_get_auto_rd_done - Check for auto read completion
1094 * @hw: pointer to the HW structure
1095 *
1096 * Check EEPROM for Auto Read done bit.
1097 **/
1098s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1099{
1100 s32 i = 0;
1101 s32 ret_val = 0;
1102
1103
1104 while (i < AUTO_READ_DONE_TIMEOUT) {
1105 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1106 break;
1107 msleep(1);
1108 i++;
1109 }
1110
1111 if (i == AUTO_READ_DONE_TIMEOUT) {
1112 hw_dbg("Auto read by HW from NVM has not completed.\n");
1113 ret_val = -E1000_ERR_RESET;
1114 goto out;
1115 }
1116
1117out:
1118 return ret_val;
1119}
1120
1121/**
1122 * igb_valid_led_default - Verify a valid default LED config
1123 * @hw: pointer to the HW structure
1124 * @data: pointer to the NVM (EEPROM)
1125 *
1126 * Read the EEPROM for the current default LED configuration. If the
1127 * LED configuration is not valid, set to a valid LED configuration.
1128 **/
1129static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1130{
1131 s32 ret_val;
1132
1133 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1134 if (ret_val) {
1135 hw_dbg("NVM Read Error\n");
1136 goto out;
1137 }
1138
1139 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1140 switch(hw->phy.media_type) {
1141 case e1000_media_type_internal_serdes:
1142 *data = ID_LED_DEFAULT_82575_SERDES;
1143 break;
1144 case e1000_media_type_copper:
1145 default:
1146 *data = ID_LED_DEFAULT;
1147 break;
1148 }
1149 }
1150out:
1151 return ret_val;
1152}
1153
1154/**
1155 * igb_id_led_init -
1156 * @hw: pointer to the HW structure
1157 *
1158 **/
1159s32 igb_id_led_init(struct e1000_hw *hw)
1160{
1161 struct e1000_mac_info *mac = &hw->mac;
1162 s32 ret_val;
1163 const u32 ledctl_mask = 0x000000FF;
1164 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1165 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1166 u16 data, i, temp;
1167 const u16 led_mask = 0x0F;
1168
1169 ret_val = igb_valid_led_default(hw, &data);
1170 if (ret_val)
1171 goto out;
1172
1173 mac->ledctl_default = rd32(E1000_LEDCTL);
1174 mac->ledctl_mode1 = mac->ledctl_default;
1175 mac->ledctl_mode2 = mac->ledctl_default;
1176
1177 for (i = 0; i < 4; i++) {
1178 temp = (data >> (i << 2)) & led_mask;
1179 switch (temp) {
1180 case ID_LED_ON1_DEF2:
1181 case ID_LED_ON1_ON2:
1182 case ID_LED_ON1_OFF2:
1183 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1184 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1185 break;
1186 case ID_LED_OFF1_DEF2:
1187 case ID_LED_OFF1_ON2:
1188 case ID_LED_OFF1_OFF2:
1189 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1190 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1191 break;
1192 default:
1193 /* Do nothing */
1194 break;
1195 }
1196 switch (temp) {
1197 case ID_LED_DEF1_ON2:
1198 case ID_LED_ON1_ON2:
1199 case ID_LED_OFF1_ON2:
1200 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1201 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1202 break;
1203 case ID_LED_DEF1_OFF2:
1204 case ID_LED_ON1_OFF2:
1205 case ID_LED_OFF1_OFF2:
1206 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1207 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1208 break;
1209 default:
1210 /* Do nothing */
1211 break;
1212 }
1213 }
1214
1215out:
1216 return ret_val;
1217}
1218
1219/**
1220 * igb_cleanup_led - Set LED config to default operation
1221 * @hw: pointer to the HW structure
1222 *
1223 * Remove the current LED configuration and set the LED configuration
1224 * to the default value, saved from the EEPROM.
1225 **/
1226s32 igb_cleanup_led(struct e1000_hw *hw)
1227{
1228 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1229 return 0;
1230}
1231
1232/**
1233 * igb_blink_led - Blink LED
1234 * @hw: pointer to the HW structure
1235 *
1236 * Blink the led's which are set to be on.
1237 **/
1238s32 igb_blink_led(struct e1000_hw *hw)
1239{
1240 u32 ledctl_blink = 0;
1241 u32 i;
1242
1243 /*
1244 * set the blink bit for each LED that's "on" (0x0E)
1245 * in ledctl_mode2
1246 */
1247 ledctl_blink = hw->mac.ledctl_mode2;
1248 for (i = 0; i < 4; i++)
1249 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1250 E1000_LEDCTL_MODE_LED_ON)
1251 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1252 (i * 8));
1253
1254 wr32(E1000_LEDCTL, ledctl_blink);
1255
1256 return 0;
1257}
1258
1259/**
1260 * igb_led_off - Turn LED off
1261 * @hw: pointer to the HW structure
1262 *
1263 * Turn LED off.
1264 **/
1265s32 igb_led_off(struct e1000_hw *hw)
1266{
1267 switch (hw->phy.media_type) {
1268 case e1000_media_type_copper:
1269 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1270 break;
1271 default:
1272 break;
1273 }
1274
1275 return 0;
1276}
1277
1278/**
1279 * igb_disable_pcie_master - Disables PCI-express master access
1280 * @hw: pointer to the HW structure
1281 *
1282 * Returns 0 (0) if successful, else returns -10
1283 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1284 * the master requests to be disabled.
1285 *
1286 * Disables PCI-Express master access and verifies there are no pending
1287 * requests.
1288 **/
1289s32 igb_disable_pcie_master(struct e1000_hw *hw)
1290{
1291 u32 ctrl;
1292 s32 timeout = MASTER_DISABLE_TIMEOUT;
1293 s32 ret_val = 0;
1294
1295 if (hw->bus.type != e1000_bus_type_pci_express)
1296 goto out;
1297
1298 ctrl = rd32(E1000_CTRL);
1299 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1300 wr32(E1000_CTRL, ctrl);
1301
1302 while (timeout) {
1303 if (!(rd32(E1000_STATUS) &
1304 E1000_STATUS_GIO_MASTER_ENABLE))
1305 break;
1306 udelay(100);
1307 timeout--;
1308 }
1309
1310 if (!timeout) {
1311 hw_dbg("Master requests are pending.\n");
1312 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1313 goto out;
1314 }
1315
1316out:
1317 return ret_val;
1318}
1319
1320/**
1321 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1322 * @hw: pointer to the HW structure
1323 *
1324 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1325 * set, which is forced to MDI mode only.
1326 **/
1327s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1328{
1329 s32 ret_val = 0;
1330
1331 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1332 hw_dbg("Invalid MDI setting detected\n");
1333 hw->phy.mdix = 1;
1334 ret_val = -E1000_ERR_CONFIG;
1335 goto out;
1336 }
1337
1338out:
1339 return ret_val;
1340}
1341
1342/**
1343 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1344 * @hw: pointer to the HW structure
1345 * @reg: 32bit register offset such as E1000_SCTL
1346 * @offset: register offset to write to
1347 * @data: data to write at register offset
1348 *
1349 * Writes an address/data control type register. There are several of these
1350 * and they all have the format address << 8 | data and bit 31 is polled for
1351 * completion.
1352 **/
1353s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1354 u32 offset, u8 data)
1355{
1356 u32 i, regvalue = 0;
1357 s32 ret_val = 0;
1358
1359 /* Set up the address and data */
1360 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1361 wr32(reg, regvalue);
1362
1363 /* Poll the ready bit to see if the MDI read completed */
1364 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1365 udelay(5);
1366 regvalue = rd32(reg);
1367 if (regvalue & E1000_GEN_CTL_READY)
1368 break;
1369 }
1370 if (!(regvalue & E1000_GEN_CTL_READY)) {
1371 hw_dbg("Reg %08x did not indicate ready\n", reg);
1372 ret_val = -E1000_ERR_PHY;
1373 goto out;
1374 }
1375
1376out:
1377 return ret_val;
1378}
1379
1380/**
1381 * igb_enable_mng_pass_thru - Enable processing of ARP's
1382 * @hw: pointer to the HW structure
1383 *
1384 * Verifies the hardware needs to leave interface enabled so that frames can
1385 * be directed to and from the management interface.
1386 **/
1387bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1388{
1389 u32 manc;
1390 u32 fwsm, factps;
1391 bool ret_val = false;
1392
1393 if (!hw->mac.asf_firmware_present)
1394 goto out;
1395
1396 manc = rd32(E1000_MANC);
1397
1398 if (!(manc & E1000_MANC_RCV_TCO_EN))
1399 goto out;
1400
1401 if (hw->mac.arc_subsystem_valid) {
1402 fwsm = rd32(E1000_FWSM);
1403 factps = rd32(E1000_FACTPS);
1404
1405 if (!(factps & E1000_FACTPS_MNGCG) &&
1406 ((fwsm & E1000_FWSM_MODE_MASK) ==
1407 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1408 ret_val = true;
1409 goto out;
1410 }
1411 } else {
1412 if ((manc & E1000_MANC_SMBUS_EN) &&
1413 !(manc & E1000_MANC_ASF_EN)) {
1414 ret_val = true;
1415 goto out;
1416 }
1417 }
1418
1419out:
1420 return ret_val;
1421}
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
new file mode 100644
index 00000000000..4927f61fbbc
--- /dev/null
+++ b/drivers/net/igb/e1000_mac.h
@@ -0,0 +1,90 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MAC_H_
29#define _E1000_MAC_H_
30
31#include "e1000_hw.h"
32
33#include "e1000_phy.h"
34#include "e1000_nvm.h"
35#include "e1000_defines.h"
36
37/*
38 * Functions that should not be called directly from drivers but can be used
39 * by other files in this 'shared code'
40 */
41s32 igb_blink_led(struct e1000_hw *hw);
42s32 igb_check_for_copper_link(struct e1000_hw *hw);
43s32 igb_cleanup_led(struct e1000_hw *hw);
44s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
45s32 igb_disable_pcie_master(struct e1000_hw *hw);
46s32 igb_force_mac_fc(struct e1000_hw *hw);
47s32 igb_get_auto_rd_done(struct e1000_hw *hw);
48s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
49s32 igb_get_hw_semaphore(struct e1000_hw *hw);
50s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
51 u16 *duplex);
52s32 igb_id_led_init(struct e1000_hw *hw);
53s32 igb_led_off(struct e1000_hw *hw);
54void igb_update_mc_addr_list(struct e1000_hw *hw,
55 u8 *mc_addr_list, u32 mc_addr_count);
56s32 igb_setup_link(struct e1000_hw *hw);
57s32 igb_validate_mdi_setting(struct e1000_hw *hw);
58s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
59 u32 offset, u8 data);
60
61void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
62void igb_clear_vfta(struct e1000_hw *hw);
63s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
64void igb_config_collision_dist(struct e1000_hw *hw);
65void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
66void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
67void igb_put_hw_semaphore(struct e1000_hw *hw);
68void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
69s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
70
71bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
72
73enum e1000_mng_mode {
74 e1000_mng_mode_none = 0,
75 e1000_mng_mode_asf,
76 e1000_mng_mode_pt,
77 e1000_mng_mode_ipmi,
78 e1000_mng_mode_host_if_only
79};
80
81#define E1000_FACTPS_MNGCG 0x20000000
82
83#define E1000_FWSM_MODE_MASK 0xE
84#define E1000_FWSM_MODE_SHIFT 1
85
86#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
87
88extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
89
90#endif
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
new file mode 100644
index 00000000000..74f2f11ac29
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.c
@@ -0,0 +1,446 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "e1000_mbx.h"
29
30/**
31 * igb_read_mbx - Reads a message from the mailbox
32 * @hw: pointer to the HW structure
33 * @msg: The message buffer
34 * @size: Length of buffer
35 * @mbx_id: id of mailbox to read
36 *
37 * returns SUCCESS if it successfuly read message from buffer
38 **/
39s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
40{
41 struct e1000_mbx_info *mbx = &hw->mbx;
42 s32 ret_val = -E1000_ERR_MBX;
43
44 /* limit read to size of mailbox */
45 if (size > mbx->size)
46 size = mbx->size;
47
48 if (mbx->ops.read)
49 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
50
51 return ret_val;
52}
53
54/**
55 * igb_write_mbx - Write a message to the mailbox
56 * @hw: pointer to the HW structure
57 * @msg: The message buffer
58 * @size: Length of buffer
59 * @mbx_id: id of mailbox to write
60 *
61 * returns SUCCESS if it successfully copied message into the buffer
62 **/
63s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
64{
65 struct e1000_mbx_info *mbx = &hw->mbx;
66 s32 ret_val = 0;
67
68 if (size > mbx->size)
69 ret_val = -E1000_ERR_MBX;
70
71 else if (mbx->ops.write)
72 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
73
74 return ret_val;
75}
76
77/**
78 * igb_check_for_msg - checks to see if someone sent us mail
79 * @hw: pointer to the HW structure
80 * @mbx_id: id of mailbox to check
81 *
82 * returns SUCCESS if the Status bit was found or else ERR_MBX
83 **/
84s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
85{
86 struct e1000_mbx_info *mbx = &hw->mbx;
87 s32 ret_val = -E1000_ERR_MBX;
88
89 if (mbx->ops.check_for_msg)
90 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
91
92 return ret_val;
93}
94
95/**
96 * igb_check_for_ack - checks to see if someone sent us ACK
97 * @hw: pointer to the HW structure
98 * @mbx_id: id of mailbox to check
99 *
100 * returns SUCCESS if the Status bit was found or else ERR_MBX
101 **/
102s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
103{
104 struct e1000_mbx_info *mbx = &hw->mbx;
105 s32 ret_val = -E1000_ERR_MBX;
106
107 if (mbx->ops.check_for_ack)
108 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
109
110 return ret_val;
111}
112
113/**
114 * igb_check_for_rst - checks to see if other side has reset
115 * @hw: pointer to the HW structure
116 * @mbx_id: id of mailbox to check
117 *
118 * returns SUCCESS if the Status bit was found or else ERR_MBX
119 **/
120s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
121{
122 struct e1000_mbx_info *mbx = &hw->mbx;
123 s32 ret_val = -E1000_ERR_MBX;
124
125 if (mbx->ops.check_for_rst)
126 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
127
128 return ret_val;
129}
130
131/**
132 * igb_poll_for_msg - Wait for message notification
133 * @hw: pointer to the HW structure
134 * @mbx_id: id of mailbox to write
135 *
136 * returns SUCCESS if it successfully received a message notification
137 **/
138static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
139{
140 struct e1000_mbx_info *mbx = &hw->mbx;
141 int countdown = mbx->timeout;
142
143 if (!countdown || !mbx->ops.check_for_msg)
144 goto out;
145
146 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
147 countdown--;
148 if (!countdown)
149 break;
150 udelay(mbx->usec_delay);
151 }
152
153 /* if we failed, all future posted messages fail until reset */
154 if (!countdown)
155 mbx->timeout = 0;
156out:
157 return countdown ? 0 : -E1000_ERR_MBX;
158}
159
160/**
161 * igb_poll_for_ack - Wait for message acknowledgement
162 * @hw: pointer to the HW structure
163 * @mbx_id: id of mailbox to write
164 *
165 * returns SUCCESS if it successfully received a message acknowledgement
166 **/
167static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
168{
169 struct e1000_mbx_info *mbx = &hw->mbx;
170 int countdown = mbx->timeout;
171
172 if (!countdown || !mbx->ops.check_for_ack)
173 goto out;
174
175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
176 countdown--;
177 if (!countdown)
178 break;
179 udelay(mbx->usec_delay);
180 }
181
182 /* if we failed, all future posted messages fail until reset */
183 if (!countdown)
184 mbx->timeout = 0;
185out:
186 return countdown ? 0 : -E1000_ERR_MBX;
187}
188
189/**
190 * igb_read_posted_mbx - Wait for message notification and receive message
191 * @hw: pointer to the HW structure
192 * @msg: The message buffer
193 * @size: Length of buffer
194 * @mbx_id: id of mailbox to write
195 *
196 * returns SUCCESS if it successfully received a message notification and
197 * copied it into the receive buffer.
198 **/
199static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
200{
201 struct e1000_mbx_info *mbx = &hw->mbx;
202 s32 ret_val = -E1000_ERR_MBX;
203
204 if (!mbx->ops.read)
205 goto out;
206
207 ret_val = igb_poll_for_msg(hw, mbx_id);
208
209 if (!ret_val)
210 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
211out:
212 return ret_val;
213}
214
215/**
216 * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
217 * @hw: pointer to the HW structure
218 * @msg: The message buffer
219 * @size: Length of buffer
220 * @mbx_id: id of mailbox to write
221 *
222 * returns SUCCESS if it successfully copied message into the buffer and
223 * received an ack to that message within delay * timeout period
224 **/
225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
226{
227 struct e1000_mbx_info *mbx = &hw->mbx;
228 s32 ret_val = -E1000_ERR_MBX;
229
230 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout)
232 goto out;
233
234 /* send msg */
235 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
236
237 /* if msg sent wait until we receive an ack */
238 if (!ret_val)
239 ret_val = igb_poll_for_ack(hw, mbx_id);
240out:
241 return ret_val;
242}
243
244static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
245{
246 u32 mbvficr = rd32(E1000_MBVFICR);
247 s32 ret_val = -E1000_ERR_MBX;
248
249 if (mbvficr & mask) {
250 ret_val = 0;
251 wr32(E1000_MBVFICR, mask);
252 }
253
254 return ret_val;
255}
256
257/**
258 * igb_check_for_msg_pf - checks to see if the VF has sent mail
259 * @hw: pointer to the HW structure
260 * @vf_number: the VF index
261 *
262 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
263 **/
264static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
265{
266 s32 ret_val = -E1000_ERR_MBX;
267
268 if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
269 ret_val = 0;
270 hw->mbx.stats.reqs++;
271 }
272
273 return ret_val;
274}
275
276/**
277 * igb_check_for_ack_pf - checks to see if the VF has ACKed
278 * @hw: pointer to the HW structure
279 * @vf_number: the VF index
280 *
281 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
282 **/
283static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
284{
285 s32 ret_val = -E1000_ERR_MBX;
286
287 if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
288 ret_val = 0;
289 hw->mbx.stats.acks++;
290 }
291
292 return ret_val;
293}
294
295/**
296 * igb_check_for_rst_pf - checks to see if the VF has reset
297 * @hw: pointer to the HW structure
298 * @vf_number: the VF index
299 *
300 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
301 **/
302static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
303{
304 u32 vflre = rd32(E1000_VFLRE);
305 s32 ret_val = -E1000_ERR_MBX;
306
307 if (vflre & (1 << vf_number)) {
308 ret_val = 0;
309 wr32(E1000_VFLRE, (1 << vf_number));
310 hw->mbx.stats.rsts++;
311 }
312
313 return ret_val;
314}
315
316/**
317 * igb_obtain_mbx_lock_pf - obtain mailbox lock
318 * @hw: pointer to the HW structure
319 * @vf_number: the VF index
320 *
321 * return SUCCESS if we obtained the mailbox lock
322 **/
323static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
324{
325 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox;
327
328
329 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331
332 /* reserve mailbox for vf use */
333 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
334 if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
335 ret_val = 0;
336
337 return ret_val;
338}
339
340/**
341 * igb_write_mbx_pf - Places a message in the mailbox
342 * @hw: pointer to the HW structure
343 * @msg: The message buffer
344 * @size: Length of buffer
345 * @vf_number: the VF index
346 *
347 * returns SUCCESS if it successfully copied message into the buffer
348 **/
349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
350 u16 vf_number)
351{
352 s32 ret_val;
353 u16 i;
354
355 /* lock the mailbox to prevent pf/vf race condition */
356 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
357 if (ret_val)
358 goto out_no_write;
359
360 /* flush msg and acks as we are overwriting the message buffer */
361 igb_check_for_msg_pf(hw, vf_number);
362 igb_check_for_ack_pf(hw, vf_number);
363
364 /* copy the caller specified message to the mailbox memory buffer */
365 for (i = 0; i < size; i++)
366 array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
367
368 /* Interrupt VF to tell it a message has been sent and release buffer*/
369 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
370
371 /* update stats */
372 hw->mbx.stats.msgs_tx++;
373
374out_no_write:
375 return ret_val;
376
377}
378
379/**
380 * igb_read_mbx_pf - Read a message from the mailbox
381 * @hw: pointer to the HW structure
382 * @msg: The message buffer
383 * @size: Length of buffer
384 * @vf_number: the VF index
385 *
386 * This function copies a message from the mailbox buffer to the caller's
387 * memory buffer. The presumption is that the caller knows that there was
388 * a message due to a VF request so no polling for message is needed.
389 **/
390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
391 u16 vf_number)
392{
393 s32 ret_val;
394 u16 i;
395
396 /* lock the mailbox to prevent pf/vf race condition */
397 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
398 if (ret_val)
399 goto out_no_read;
400
401 /* copy the message to the mailbox memory buffer */
402 for (i = 0; i < size; i++)
403 msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
404
405 /* Acknowledge the message and release buffer */
406 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
407
408 /* update stats */
409 hw->mbx.stats.msgs_rx++;
410
411out_no_read:
412 return ret_val;
413}
414
415/**
416 * e1000_init_mbx_params_pf - set initial values for pf mailbox
417 * @hw: pointer to the HW structure
418 *
419 * Initializes the hw->mbx struct to correct values for pf mailbox
420 */
421s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
422{
423 struct e1000_mbx_info *mbx = &hw->mbx;
424
425 mbx->timeout = 0;
426 mbx->usec_delay = 0;
427
428 mbx->size = E1000_VFMAILBOX_SIZE;
429
430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437
438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_rx = 0;
440 mbx->stats.reqs = 0;
441 mbx->stats.acks = 0;
442 mbx->stats.rsts = 0;
443
444 return 0;
445}
446
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
new file mode 100644
index 00000000000..eddb0f83dce
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.h
@@ -0,0 +1,77 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MBX_H_
29#define _E1000_MBX_H_
30
31#include "e1000_hw.h"
32
33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
38
39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
43
44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
45
46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
47 * PF. The reverse is true if it is E1000_PF_*.
48 * Message ACK's are the value or'd with 0xF0000000
49 */
50#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
51 * this are the ACK */
52#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
53 * this are the NACK */
54#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
55 clear to send requests */
56#define E1000_VT_MSGINFO_SHIFT 16
57/* bits 23:16 are used for exra info for certain messages */
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59
60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
67
68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
69
70s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
72s32 igb_check_for_msg(struct e1000_hw *, u16);
73s32 igb_check_for_ack(struct e1000_hw *, u16);
74s32 igb_check_for_rst(struct e1000_hw *, u16);
75s32 igb_init_mbx_params_pf(struct e1000_hw *);
76
77#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
new file mode 100644
index 00000000000..40407124e72
--- /dev/null
+++ b/drivers/net/igb/e1000_nvm.c
@@ -0,0 +1,713 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30
31#include "e1000_mac.h"
32#include "e1000_nvm.h"
33
34/**
35 * igb_raise_eec_clk - Raise EEPROM clock
36 * @hw: pointer to the HW structure
37 * @eecd: pointer to the EEPROM
38 *
39 * Enable/Raise the EEPROM clock bit.
40 **/
41static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
42{
43 *eecd = *eecd | E1000_EECD_SK;
44 wr32(E1000_EECD, *eecd);
45 wrfl();
46 udelay(hw->nvm.delay_usec);
47}
48
49/**
50 * igb_lower_eec_clk - Lower EEPROM clock
51 * @hw: pointer to the HW structure
52 * @eecd: pointer to the EEPROM
53 *
54 * Clear/Lower the EEPROM clock bit.
55 **/
56static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
57{
58 *eecd = *eecd & ~E1000_EECD_SK;
59 wr32(E1000_EECD, *eecd);
60 wrfl();
61 udelay(hw->nvm.delay_usec);
62}
63
64/**
65 * igb_shift_out_eec_bits - Shift data bits our to the EEPROM
66 * @hw: pointer to the HW structure
67 * @data: data to send to the EEPROM
68 * @count: number of bits to shift out
69 *
70 * We need to shift 'count' bits out to the EEPROM. So, the value in the
71 * "data" parameter will be shifted out to the EEPROM one bit at a time.
72 * In order to do this, "data" must be broken down into bits.
73 **/
74static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
75{
76 struct e1000_nvm_info *nvm = &hw->nvm;
77 u32 eecd = rd32(E1000_EECD);
78 u32 mask;
79
80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_spi)
82 eecd |= E1000_EECD_DO;
83
84 do {
85 eecd &= ~E1000_EECD_DI;
86
87 if (data & mask)
88 eecd |= E1000_EECD_DI;
89
90 wr32(E1000_EECD, eecd);
91 wrfl();
92
93 udelay(nvm->delay_usec);
94
95 igb_raise_eec_clk(hw, &eecd);
96 igb_lower_eec_clk(hw, &eecd);
97
98 mask >>= 1;
99 } while (mask);
100
101 eecd &= ~E1000_EECD_DI;
102 wr32(E1000_EECD, eecd);
103}
104
105/**
106 * igb_shift_in_eec_bits - Shift data bits in from the EEPROM
107 * @hw: pointer to the HW structure
108 * @count: number of bits to shift in
109 *
110 * In order to read a register from the EEPROM, we need to shift 'count' bits
111 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
112 * the EEPROM (setting the SK bit), and then reading the value of the data out
113 * "DO" bit. During this "shifting in" process the data in "DI" bit should
114 * always be clear.
115 **/
116static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
117{
118 u32 eecd;
119 u32 i;
120 u16 data;
121
122 eecd = rd32(E1000_EECD);
123
124 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
125 data = 0;
126
127 for (i = 0; i < count; i++) {
128 data <<= 1;
129 igb_raise_eec_clk(hw, &eecd);
130
131 eecd = rd32(E1000_EECD);
132
133 eecd &= ~E1000_EECD_DI;
134 if (eecd & E1000_EECD_DO)
135 data |= 1;
136
137 igb_lower_eec_clk(hw, &eecd);
138 }
139
140 return data;
141}
142
143/**
144 * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
145 * @hw: pointer to the HW structure
146 * @ee_reg: EEPROM flag for polling
147 *
148 * Polls the EEPROM status bit for either read or write completion based
149 * upon the value of 'ee_reg'.
150 **/
151static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
152{
153 u32 attempts = 100000;
154 u32 i, reg = 0;
155 s32 ret_val = -E1000_ERR_NVM;
156
157 for (i = 0; i < attempts; i++) {
158 if (ee_reg == E1000_NVM_POLL_READ)
159 reg = rd32(E1000_EERD);
160 else
161 reg = rd32(E1000_EEWR);
162
163 if (reg & E1000_NVM_RW_REG_DONE) {
164 ret_val = 0;
165 break;
166 }
167
168 udelay(5);
169 }
170
171 return ret_val;
172}
173
174/**
175 * igb_acquire_nvm - Generic request for access to EEPROM
176 * @hw: pointer to the HW structure
177 *
178 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
179 * Return successful if access grant bit set, else clear the request for
180 * EEPROM access and return -E1000_ERR_NVM (-1).
181 **/
182s32 igb_acquire_nvm(struct e1000_hw *hw)
183{
184 u32 eecd = rd32(E1000_EECD);
185 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
186 s32 ret_val = 0;
187
188
189 wr32(E1000_EECD, eecd | E1000_EECD_REQ);
190 eecd = rd32(E1000_EECD);
191
192 while (timeout) {
193 if (eecd & E1000_EECD_GNT)
194 break;
195 udelay(5);
196 eecd = rd32(E1000_EECD);
197 timeout--;
198 }
199
200 if (!timeout) {
201 eecd &= ~E1000_EECD_REQ;
202 wr32(E1000_EECD, eecd);
203 hw_dbg("Could not acquire NVM grant\n");
204 ret_val = -E1000_ERR_NVM;
205 }
206
207 return ret_val;
208}
209
210/**
211 * igb_standby_nvm - Return EEPROM to standby state
212 * @hw: pointer to the HW structure
213 *
214 * Return the EEPROM to a standby state.
215 **/
216static void igb_standby_nvm(struct e1000_hw *hw)
217{
218 struct e1000_nvm_info *nvm = &hw->nvm;
219 u32 eecd = rd32(E1000_EECD);
220
221 if (nvm->type == e1000_nvm_eeprom_spi) {
222 /* Toggle CS to flush commands */
223 eecd |= E1000_EECD_CS;
224 wr32(E1000_EECD, eecd);
225 wrfl();
226 udelay(nvm->delay_usec);
227 eecd &= ~E1000_EECD_CS;
228 wr32(E1000_EECD, eecd);
229 wrfl();
230 udelay(nvm->delay_usec);
231 }
232}
233
234/**
235 * e1000_stop_nvm - Terminate EEPROM command
236 * @hw: pointer to the HW structure
237 *
238 * Terminates the current command by inverting the EEPROM's chip select pin.
239 **/
240static void e1000_stop_nvm(struct e1000_hw *hw)
241{
242 u32 eecd;
243
244 eecd = rd32(E1000_EECD);
245 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
246 /* Pull CS high */
247 eecd |= E1000_EECD_CS;
248 igb_lower_eec_clk(hw, &eecd);
249 }
250}
251
252/**
253 * igb_release_nvm - Release exclusive access to EEPROM
254 * @hw: pointer to the HW structure
255 *
256 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
257 **/
258void igb_release_nvm(struct e1000_hw *hw)
259{
260 u32 eecd;
261
262 e1000_stop_nvm(hw);
263
264 eecd = rd32(E1000_EECD);
265 eecd &= ~E1000_EECD_REQ;
266 wr32(E1000_EECD, eecd);
267}
268
269/**
270 * igb_ready_nvm_eeprom - Prepares EEPROM for read/write
271 * @hw: pointer to the HW structure
272 *
273 * Setups the EEPROM for reading and writing.
274 **/
275static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
276{
277 struct e1000_nvm_info *nvm = &hw->nvm;
278 u32 eecd = rd32(E1000_EECD);
279 s32 ret_val = 0;
280 u16 timeout = 0;
281 u8 spi_stat_reg;
282
283
284 if (nvm->type == e1000_nvm_eeprom_spi) {
285 /* Clear SK and CS */
286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
287 wr32(E1000_EECD, eecd);
288 wrfl();
289 udelay(1);
290 timeout = NVM_MAX_RETRY_SPI;
291
292 /*
293 * Read "Status Register" repeatedly until the LSB is cleared.
294 * The EEPROM will signal that the command has been completed
295 * by clearing bit 0 of the internal status register. If it's
296 * not cleared within 'timeout', then error out.
297 */
298 while (timeout) {
299 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
300 hw->nvm.opcode_bits);
301 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
302 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
303 break;
304
305 udelay(5);
306 igb_standby_nvm(hw);
307 timeout--;
308 }
309
310 if (!timeout) {
311 hw_dbg("SPI NVM Status error\n");
312 ret_val = -E1000_ERR_NVM;
313 goto out;
314 }
315 }
316
317out:
318 return ret_val;
319}
320
321/**
322 * igb_read_nvm_spi - Read EEPROM's using SPI
323 * @hw: pointer to the HW structure
324 * @offset: offset of word in the EEPROM to read
325 * @words: number of words to read
326 * @data: word read from the EEPROM
327 *
328 * Reads a 16 bit word from the EEPROM.
329 **/
330s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
331{
332 struct e1000_nvm_info *nvm = &hw->nvm;
333 u32 i = 0;
334 s32 ret_val;
335 u16 word_in;
336 u8 read_opcode = NVM_READ_OPCODE_SPI;
337
338 /*
339 * A check for invalid values: offset too large, too many words,
340 * and not enough words.
341 */
342 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
343 (words == 0)) {
344 hw_dbg("nvm parameter(s) out of bounds\n");
345 ret_val = -E1000_ERR_NVM;
346 goto out;
347 }
348
349 ret_val = nvm->ops.acquire(hw);
350 if (ret_val)
351 goto out;
352
353 ret_val = igb_ready_nvm_eeprom(hw);
354 if (ret_val)
355 goto release;
356
357 igb_standby_nvm(hw);
358
359 if ((nvm->address_bits == 8) && (offset >= 128))
360 read_opcode |= NVM_A8_OPCODE_SPI;
361
362 /* Send the READ command (opcode + addr) */
363 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
364 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
365
366 /*
367 * Read the data. SPI NVMs increment the address with each byte
368 * read and will roll over if reading beyond the end. This allows
369 * us to read the whole NVM from any offset
370 */
371 for (i = 0; i < words; i++) {
372 word_in = igb_shift_in_eec_bits(hw, 16);
373 data[i] = (word_in >> 8) | (word_in << 8);
374 }
375
376release:
377 nvm->ops.release(hw);
378
379out:
380 return ret_val;
381}
382
383/**
384 * igb_read_nvm_eerd - Reads EEPROM using EERD register
385 * @hw: pointer to the HW structure
386 * @offset: offset of word in the EEPROM to read
387 * @words: number of words to read
388 * @data: word read from the EEPROM
389 *
390 * Reads a 16 bit word from the EEPROM using the EERD register.
391 **/
392s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
393{
394 struct e1000_nvm_info *nvm = &hw->nvm;
395 u32 i, eerd = 0;
396 s32 ret_val = 0;
397
398 /*
399 * A check for invalid values: offset too large, too many words,
400 * and not enough words.
401 */
402 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
403 (words == 0)) {
404 hw_dbg("nvm parameter(s) out of bounds\n");
405 ret_val = -E1000_ERR_NVM;
406 goto out;
407 }
408
409 for (i = 0; i < words; i++) {
410 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
411 E1000_NVM_RW_REG_START;
412
413 wr32(E1000_EERD, eerd);
414 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
415 if (ret_val)
416 break;
417
418 data[i] = (rd32(E1000_EERD) >>
419 E1000_NVM_RW_REG_DATA);
420 }
421
422out:
423 return ret_val;
424}
425
426/**
427 * igb_write_nvm_spi - Write to EEPROM using SPI
428 * @hw: pointer to the HW structure
429 * @offset: offset within the EEPROM to be written to
430 * @words: number of words to write
431 * @data: 16 bit word(s) to be written to the EEPROM
432 *
433 * Writes data to EEPROM at offset using SPI interface.
434 *
435 * If e1000_update_nvm_checksum is not called after this function , the
436 * EEPROM will most likley contain an invalid checksum.
437 **/
438s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
439{
440 struct e1000_nvm_info *nvm = &hw->nvm;
441 s32 ret_val;
442 u16 widx = 0;
443
444 /*
445 * A check for invalid values: offset too large, too many words,
446 * and not enough words.
447 */
448 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
449 (words == 0)) {
450 hw_dbg("nvm parameter(s) out of bounds\n");
451 ret_val = -E1000_ERR_NVM;
452 goto out;
453 }
454
455 ret_val = hw->nvm.ops.acquire(hw);
456 if (ret_val)
457 goto out;
458
459 msleep(10);
460
461 while (widx < words) {
462 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
463
464 ret_val = igb_ready_nvm_eeprom(hw);
465 if (ret_val)
466 goto release;
467
468 igb_standby_nvm(hw);
469
470 /* Send the WRITE ENABLE command (8 bit opcode) */
471 igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
472 nvm->opcode_bits);
473
474 igb_standby_nvm(hw);
475
476 /*
477 * Some SPI eeproms use the 8th address bit embedded in the
478 * opcode
479 */
480 if ((nvm->address_bits == 8) && (offset >= 128))
481 write_opcode |= NVM_A8_OPCODE_SPI;
482
483 /* Send the Write command (8-bit opcode + addr) */
484 igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
485 igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
486 nvm->address_bits);
487
488 /* Loop to allow for up to whole page write of eeprom */
489 while (widx < words) {
490 u16 word_out = data[widx];
491 word_out = (word_out >> 8) | (word_out << 8);
492 igb_shift_out_eec_bits(hw, word_out, 16);
493 widx++;
494
495 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
496 igb_standby_nvm(hw);
497 break;
498 }
499 }
500 }
501
502 msleep(10);
503release:
504 hw->nvm.ops.release(hw);
505
506out:
507 return ret_val;
508}
509
510/**
511 * igb_read_part_string - Read device part number
512 * @hw: pointer to the HW structure
513 * @part_num: pointer to device part number
514 * @part_num_size: size of part number buffer
515 *
516 * Reads the product board assembly (PBA) number from the EEPROM and stores
517 * the value in part_num.
518 **/
519s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
520{
521 s32 ret_val;
522 u16 nvm_data;
523 u16 pointer;
524 u16 offset;
525 u16 length;
526
527 if (part_num == NULL) {
528 hw_dbg("PBA string buffer was null\n");
529 ret_val = E1000_ERR_INVALID_ARGUMENT;
530 goto out;
531 }
532
533 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
534 if (ret_val) {
535 hw_dbg("NVM Read Error\n");
536 goto out;
537 }
538
539 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
540 if (ret_val) {
541 hw_dbg("NVM Read Error\n");
542 goto out;
543 }
544
545 /*
546 * if nvm_data is not ptr guard the PBA must be in legacy format which
547 * means pointer is actually our second data word for the PBA number
548 * and we can decode it into an ascii string
549 */
550 if (nvm_data != NVM_PBA_PTR_GUARD) {
551 hw_dbg("NVM PBA number is not stored as string\n");
552
553 /* we will need 11 characters to store the PBA */
554 if (part_num_size < 11) {
555 hw_dbg("PBA string buffer too small\n");
556 return E1000_ERR_NO_SPACE;
557 }
558
559 /* extract hex string from data and pointer */
560 part_num[0] = (nvm_data >> 12) & 0xF;
561 part_num[1] = (nvm_data >> 8) & 0xF;
562 part_num[2] = (nvm_data >> 4) & 0xF;
563 part_num[3] = nvm_data & 0xF;
564 part_num[4] = (pointer >> 12) & 0xF;
565 part_num[5] = (pointer >> 8) & 0xF;
566 part_num[6] = '-';
567 part_num[7] = 0;
568 part_num[8] = (pointer >> 4) & 0xF;
569 part_num[9] = pointer & 0xF;
570
571 /* put a null character on the end of our string */
572 part_num[10] = '\0';
573
574 /* switch all the data but the '-' to hex char */
575 for (offset = 0; offset < 10; offset++) {
576 if (part_num[offset] < 0xA)
577 part_num[offset] += '0';
578 else if (part_num[offset] < 0x10)
579 part_num[offset] += 'A' - 0xA;
580 }
581
582 goto out;
583 }
584
585 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
586 if (ret_val) {
587 hw_dbg("NVM Read Error\n");
588 goto out;
589 }
590
591 if (length == 0xFFFF || length == 0) {
592 hw_dbg("NVM PBA number section invalid length\n");
593 ret_val = E1000_ERR_NVM_PBA_SECTION;
594 goto out;
595 }
596 /* check if part_num buffer is big enough */
597 if (part_num_size < (((u32)length * 2) - 1)) {
598 hw_dbg("PBA string buffer too small\n");
599 ret_val = E1000_ERR_NO_SPACE;
600 goto out;
601 }
602
603 /* trim pba length from start of string */
604 pointer++;
605 length--;
606
607 for (offset = 0; offset < length; offset++) {
608 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
609 if (ret_val) {
610 hw_dbg("NVM Read Error\n");
611 goto out;
612 }
613 part_num[offset * 2] = (u8)(nvm_data >> 8);
614 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
615 }
616 part_num[offset * 2] = '\0';
617
618out:
619 return ret_val;
620}
621
622/**
623 * igb_read_mac_addr - Read device MAC address
624 * @hw: pointer to the HW structure
625 *
626 * Reads the device MAC address from the EEPROM and stores the value.
627 * Since devices with two ports use the same EEPROM, we increment the
628 * last bit in the MAC address for the second port.
629 **/
630s32 igb_read_mac_addr(struct e1000_hw *hw)
631{
632 u32 rar_high;
633 u32 rar_low;
634 u16 i;
635
636 rar_high = rd32(E1000_RAH(0));
637 rar_low = rd32(E1000_RAL(0));
638
639 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
640 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
641
642 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
643 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
644
645 for (i = 0; i < ETH_ALEN; i++)
646 hw->mac.addr[i] = hw->mac.perm_addr[i];
647
648 return 0;
649}
650
651/**
652 * igb_validate_nvm_checksum - Validate EEPROM checksum
653 * @hw: pointer to the HW structure
654 *
655 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
656 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
657 **/
658s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
659{
660 s32 ret_val = 0;
661 u16 checksum = 0;
662 u16 i, nvm_data;
663
664 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
665 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
666 if (ret_val) {
667 hw_dbg("NVM Read Error\n");
668 goto out;
669 }
670 checksum += nvm_data;
671 }
672
673 if (checksum != (u16) NVM_SUM) {
674 hw_dbg("NVM Checksum Invalid\n");
675 ret_val = -E1000_ERR_NVM;
676 goto out;
677 }
678
679out:
680 return ret_val;
681}
682
683/**
684 * igb_update_nvm_checksum - Update EEPROM checksum
685 * @hw: pointer to the HW structure
686 *
687 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
688 * up to the checksum. Then calculates the EEPROM checksum and writes the
689 * value to the EEPROM.
690 **/
691s32 igb_update_nvm_checksum(struct e1000_hw *hw)
692{
693 s32 ret_val;
694 u16 checksum = 0;
695 u16 i, nvm_data;
696
697 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
698 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
699 if (ret_val) {
700 hw_dbg("NVM Read Error while updating checksum.\n");
701 goto out;
702 }
703 checksum += nvm_data;
704 }
705 checksum = (u16) NVM_SUM - checksum;
706 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
707 if (ret_val)
708 hw_dbg("NVM Write Error while updating checksum.\n");
709
710out:
711 return ret_val;
712}
713
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
new file mode 100644
index 00000000000..a2a7ca9fa73
--- /dev/null
+++ b/drivers/net/igb/e1000_nvm.h
@@ -0,0 +1,43 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_NVM_H_
29#define _E1000_NVM_H_
30
31s32 igb_acquire_nvm(struct e1000_hw *hw);
32void igb_release_nvm(struct e1000_hw *hw);
33s32 igb_read_mac_addr(struct e1000_hw *hw);
34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
36 u32 part_num_size);
37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
39s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
40s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
41s32 igb_update_nvm_checksum(struct e1000_hw *hw);
42
43#endif
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
new file mode 100644
index 00000000000..e662554c62d
--- /dev/null
+++ b/drivers/net/igb/e1000_phy.c
@@ -0,0 +1,2341 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30
31#include "e1000_mac.h"
32#include "e1000_phy.h"
33
34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
36 u16 *phy_ctrl);
37static s32 igb_wait_autoneg(struct e1000_hw *hw);
38
39/* Cable length tables */
40static const u16 e1000_m88_cable_length_table[] =
41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
42#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
43 (sizeof(e1000_m88_cable_length_table) / \
44 sizeof(e1000_m88_cable_length_table[0]))
45
46static const u16 e1000_igp_2_cable_length_table[] =
47 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
48 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
49 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
50 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
51 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
52 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
53 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
54 104, 109, 114, 118, 121, 124};
55#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
56 (sizeof(e1000_igp_2_cable_length_table) / \
57 sizeof(e1000_igp_2_cable_length_table[0]))
58
59/**
60 * igb_check_reset_block - Check if PHY reset is blocked
61 * @hw: pointer to the HW structure
62 *
63 * Read the PHY management control register and check whether a PHY reset
64 * is blocked. If a reset is not blocked return 0, otherwise
65 * return E1000_BLK_PHY_RESET (12).
66 **/
67s32 igb_check_reset_block(struct e1000_hw *hw)
68{
69 u32 manc;
70
71 manc = rd32(E1000_MANC);
72
73 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
74 E1000_BLK_PHY_RESET : 0;
75}
76
77/**
78 * igb_get_phy_id - Retrieve the PHY ID and revision
79 * @hw: pointer to the HW structure
80 *
81 * Reads the PHY registers and stores the PHY ID and possibly the PHY
82 * revision in the hardware structure.
83 **/
84s32 igb_get_phy_id(struct e1000_hw *hw)
85{
86 struct e1000_phy_info *phy = &hw->phy;
87 s32 ret_val = 0;
88 u16 phy_id;
89
90 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
91 if (ret_val)
92 goto out;
93
94 phy->id = (u32)(phy_id << 16);
95 udelay(20);
96 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
97 if (ret_val)
98 goto out;
99
100 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
101 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
102
103out:
104 return ret_val;
105}
106
107/**
108 * igb_phy_reset_dsp - Reset PHY DSP
109 * @hw: pointer to the HW structure
110 *
111 * Reset the digital signal processor.
112 **/
113static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
114{
115 s32 ret_val = 0;
116
117 if (!(hw->phy.ops.write_reg))
118 goto out;
119
120 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
121 if (ret_val)
122 goto out;
123
124 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
125
126out:
127 return ret_val;
128}
129
130/**
131 * igb_read_phy_reg_mdic - Read MDI control register
132 * @hw: pointer to the HW structure
133 * @offset: register offset to be read
134 * @data: pointer to the read data
135 *
136 * Reads the MDI control regsiter in the PHY at offset and stores the
137 * information read to data.
138 **/
139s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
140{
141 struct e1000_phy_info *phy = &hw->phy;
142 u32 i, mdic = 0;
143 s32 ret_val = 0;
144
145 if (offset > MAX_PHY_REG_ADDRESS) {
146 hw_dbg("PHY Address %d is out of range\n", offset);
147 ret_val = -E1000_ERR_PARAM;
148 goto out;
149 }
150
151 /*
152 * Set up Op-code, Phy Address, and register offset in the MDI
153 * Control register. The MAC will take care of interfacing with the
154 * PHY to retrieve the desired data.
155 */
156 mdic = ((offset << E1000_MDIC_REG_SHIFT) |
157 (phy->addr << E1000_MDIC_PHY_SHIFT) |
158 (E1000_MDIC_OP_READ));
159
160 wr32(E1000_MDIC, mdic);
161
162 /*
163 * Poll the ready bit to see if the MDI read completed
164 * Increasing the time out as testing showed failures with
165 * the lower time out
166 */
167 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
168 udelay(50);
169 mdic = rd32(E1000_MDIC);
170 if (mdic & E1000_MDIC_READY)
171 break;
172 }
173 if (!(mdic & E1000_MDIC_READY)) {
174 hw_dbg("MDI Read did not complete\n");
175 ret_val = -E1000_ERR_PHY;
176 goto out;
177 }
178 if (mdic & E1000_MDIC_ERROR) {
179 hw_dbg("MDI Error\n");
180 ret_val = -E1000_ERR_PHY;
181 goto out;
182 }
183 *data = (u16) mdic;
184
185out:
186 return ret_val;
187}
188
189/**
190 * igb_write_phy_reg_mdic - Write MDI control register
191 * @hw: pointer to the HW structure
192 * @offset: register offset to write to
193 * @data: data to write to register at offset
194 *
195 * Writes data to MDI control register in the PHY at offset.
196 **/
197s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
198{
199 struct e1000_phy_info *phy = &hw->phy;
200 u32 i, mdic = 0;
201 s32 ret_val = 0;
202
203 if (offset > MAX_PHY_REG_ADDRESS) {
204 hw_dbg("PHY Address %d is out of range\n", offset);
205 ret_val = -E1000_ERR_PARAM;
206 goto out;
207 }
208
209 /*
210 * Set up Op-code, Phy Address, and register offset in the MDI
211 * Control register. The MAC will take care of interfacing with the
212 * PHY to retrieve the desired data.
213 */
214 mdic = (((u32)data) |
215 (offset << E1000_MDIC_REG_SHIFT) |
216 (phy->addr << E1000_MDIC_PHY_SHIFT) |
217 (E1000_MDIC_OP_WRITE));
218
219 wr32(E1000_MDIC, mdic);
220
221 /*
222 * Poll the ready bit to see if the MDI read completed
223 * Increasing the time out as testing showed failures with
224 * the lower time out
225 */
226 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
227 udelay(50);
228 mdic = rd32(E1000_MDIC);
229 if (mdic & E1000_MDIC_READY)
230 break;
231 }
232 if (!(mdic & E1000_MDIC_READY)) {
233 hw_dbg("MDI Write did not complete\n");
234 ret_val = -E1000_ERR_PHY;
235 goto out;
236 }
237 if (mdic & E1000_MDIC_ERROR) {
238 hw_dbg("MDI Error\n");
239 ret_val = -E1000_ERR_PHY;
240 goto out;
241 }
242
243out:
244 return ret_val;
245}
246
247/**
248 * igb_read_phy_reg_i2c - Read PHY register using i2c
249 * @hw: pointer to the HW structure
250 * @offset: register offset to be read
251 * @data: pointer to the read data
252 *
253 * Reads the PHY register at offset using the i2c interface and stores the
254 * retrieved information in data.
255 **/
256s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
257{
258 struct e1000_phy_info *phy = &hw->phy;
259 u32 i, i2ccmd = 0;
260
261
262 /*
263 * Set up Op-code, Phy Address, and register address in the I2CCMD
264 * register. The MAC will take care of interfacing with the
265 * PHY to retrieve the desired data.
266 */
267 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
268 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
269 (E1000_I2CCMD_OPCODE_READ));
270
271 wr32(E1000_I2CCMD, i2ccmd);
272
273 /* Poll the ready bit to see if the I2C read completed */
274 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
275 udelay(50);
276 i2ccmd = rd32(E1000_I2CCMD);
277 if (i2ccmd & E1000_I2CCMD_READY)
278 break;
279 }
280 if (!(i2ccmd & E1000_I2CCMD_READY)) {
281 hw_dbg("I2CCMD Read did not complete\n");
282 return -E1000_ERR_PHY;
283 }
284 if (i2ccmd & E1000_I2CCMD_ERROR) {
285 hw_dbg("I2CCMD Error bit set\n");
286 return -E1000_ERR_PHY;
287 }
288
289 /* Need to byte-swap the 16-bit value. */
290 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
291
292 return 0;
293}
294
295/**
296 * igb_write_phy_reg_i2c - Write PHY register using i2c
297 * @hw: pointer to the HW structure
298 * @offset: register offset to write to
299 * @data: data to write at register offset
300 *
301 * Writes the data to PHY register at the offset using the i2c interface.
302 **/
303s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
304{
305 struct e1000_phy_info *phy = &hw->phy;
306 u32 i, i2ccmd = 0;
307 u16 phy_data_swapped;
308
309
310 /* Swap the data bytes for the I2C interface */
311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
312
313 /*
314 * Set up Op-code, Phy Address, and register address in the I2CCMD
315 * register. The MAC will take care of interfacing with the
316 * PHY to retrieve the desired data.
317 */
318 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
319 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
320 E1000_I2CCMD_OPCODE_WRITE |
321 phy_data_swapped);
322
323 wr32(E1000_I2CCMD, i2ccmd);
324
325 /* Poll the ready bit to see if the I2C read completed */
326 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
327 udelay(50);
328 i2ccmd = rd32(E1000_I2CCMD);
329 if (i2ccmd & E1000_I2CCMD_READY)
330 break;
331 }
332 if (!(i2ccmd & E1000_I2CCMD_READY)) {
333 hw_dbg("I2CCMD Write did not complete\n");
334 return -E1000_ERR_PHY;
335 }
336 if (i2ccmd & E1000_I2CCMD_ERROR) {
337 hw_dbg("I2CCMD Error bit set\n");
338 return -E1000_ERR_PHY;
339 }
340
341 return 0;
342}
343
344/**
345 * igb_read_phy_reg_igp - Read igp PHY register
346 * @hw: pointer to the HW structure
347 * @offset: register offset to be read
348 * @data: pointer to the read data
349 *
350 * Acquires semaphore, if necessary, then reads the PHY register at offset
351 * and storing the retrieved information in data. Release any acquired
352 * semaphores before exiting.
353 **/
354s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
355{
356 s32 ret_val = 0;
357
358 if (!(hw->phy.ops.acquire))
359 goto out;
360
361 ret_val = hw->phy.ops.acquire(hw);
362 if (ret_val)
363 goto out;
364
365 if (offset > MAX_PHY_MULTI_PAGE_REG) {
366 ret_val = igb_write_phy_reg_mdic(hw,
367 IGP01E1000_PHY_PAGE_SELECT,
368 (u16)offset);
369 if (ret_val) {
370 hw->phy.ops.release(hw);
371 goto out;
372 }
373 }
374
375 ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
376 data);
377
378 hw->phy.ops.release(hw);
379
380out:
381 return ret_val;
382}
383
384/**
385 * igb_write_phy_reg_igp - Write igp PHY register
386 * @hw: pointer to the HW structure
387 * @offset: register offset to write to
388 * @data: data to write at register offset
389 *
390 * Acquires semaphore, if necessary, then writes the data to PHY register
391 * at the offset. Release any acquired semaphores before exiting.
392 **/
393s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
394{
395 s32 ret_val = 0;
396
397 if (!(hw->phy.ops.acquire))
398 goto out;
399
400 ret_val = hw->phy.ops.acquire(hw);
401 if (ret_val)
402 goto out;
403
404 if (offset > MAX_PHY_MULTI_PAGE_REG) {
405 ret_val = igb_write_phy_reg_mdic(hw,
406 IGP01E1000_PHY_PAGE_SELECT,
407 (u16)offset);
408 if (ret_val) {
409 hw->phy.ops.release(hw);
410 goto out;
411 }
412 }
413
414 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
415 data);
416
417 hw->phy.ops.release(hw);
418
419out:
420 return ret_val;
421}
422
423/**
424 * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
425 * @hw: pointer to the HW structure
426 *
427 * Sets up Carrier-sense on Transmit and downshift values.
428 **/
429s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
430{
431 struct e1000_phy_info *phy = &hw->phy;
432 s32 ret_val;
433 u16 phy_data;
434
435
436 if (phy->reset_disable) {
437 ret_val = 0;
438 goto out;
439 }
440
441 if (phy->type == e1000_phy_82580) {
442 ret_val = hw->phy.ops.reset(hw);
443 if (ret_val) {
444 hw_dbg("Error resetting the PHY.\n");
445 goto out;
446 }
447 }
448
449 /* Enable CRS on TX. This must be set for half-duplex operation. */
450 ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
451 if (ret_val)
452 goto out;
453
454 phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
455
456 /* Enable downshift */
457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
458
459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
460
461out:
462 return ret_val;
463}
464
465/**
466 * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
467 * @hw: pointer to the HW structure
468 *
469 * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
470 * and downshift values are set also.
471 **/
472s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
473{
474 struct e1000_phy_info *phy = &hw->phy;
475 s32 ret_val;
476 u16 phy_data;
477
478 if (phy->reset_disable) {
479 ret_val = 0;
480 goto out;
481 }
482
483 /* Enable CRS on TX. This must be set for half-duplex operation. */
484 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
485 if (ret_val)
486 goto out;
487
488 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
489
490 /*
491 * Options:
492 * MDI/MDI-X = 0 (default)
493 * 0 - Auto for all speeds
494 * 1 - MDI mode
495 * 2 - MDI-X mode
496 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
497 */
498 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
499
500 switch (phy->mdix) {
501 case 1:
502 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
503 break;
504 case 2:
505 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
506 break;
507 case 3:
508 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
509 break;
510 case 0:
511 default:
512 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
513 break;
514 }
515
516 /*
517 * Options:
518 * disable_polarity_correction = 0 (default)
519 * Automatic Correction for Reversed Cable Polarity
520 * 0 - Disabled
521 * 1 - Enabled
522 */
523 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
524 if (phy->disable_polarity_correction == 1)
525 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
526
527 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
528 if (ret_val)
529 goto out;
530
531 if (phy->revision < E1000_REVISION_4) {
532 /*
533 * Force TX_CLK in the Extended PHY Specific Control Register
534 * to 25MHz clock.
535 */
536 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
537 &phy_data);
538 if (ret_val)
539 goto out;
540
541 phy_data |= M88E1000_EPSCR_TX_CLK_25;
542
543 if ((phy->revision == E1000_REVISION_2) &&
544 (phy->id == M88E1111_I_PHY_ID)) {
545 /* 82573L PHY - set the downshift counter to 5x. */
546 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
547 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
548 } else {
549 /* Configure Master and Slave downshift values */
550 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
551 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
552 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
553 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
554 }
555 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
556 phy_data);
557 if (ret_val)
558 goto out;
559 }
560
561 /* Commit the changes. */
562 ret_val = igb_phy_sw_reset(hw);
563 if (ret_val) {
564 hw_dbg("Error committing the PHY changes\n");
565 goto out;
566 }
567
568out:
569 return ret_val;
570}
571
572/**
573 * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
574 * @hw: pointer to the HW structure
575 *
576 * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
577 * Also enables and sets the downshift parameters.
578 **/
579s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
580{
581 struct e1000_phy_info *phy = &hw->phy;
582 s32 ret_val;
583 u16 phy_data;
584
585 if (phy->reset_disable) {
586 ret_val = 0;
587 goto out;
588 }
589
590 /* Enable CRS on Tx. This must be set for half-duplex operation. */
591 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
592 if (ret_val)
593 goto out;
594
595 /*
596 * Options:
597 * MDI/MDI-X = 0 (default)
598 * 0 - Auto for all speeds
599 * 1 - MDI mode
600 * 2 - MDI-X mode
601 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
602 */
603 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
604
605 switch (phy->mdix) {
606 case 1:
607 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
608 break;
609 case 2:
610 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
611 break;
612 case 3:
613 /* M88E1112 does not support this mode) */
614 if (phy->id != M88E1112_E_PHY_ID) {
615 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
616 break;
617 }
618 case 0:
619 default:
620 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
621 break;
622 }
623
624 /*
625 * Options:
626 * disable_polarity_correction = 0 (default)
627 * Automatic Correction for Reversed Cable Polarity
628 * 0 - Disabled
629 * 1 - Enabled
630 */
631 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
632 if (phy->disable_polarity_correction == 1)
633 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
634
635 /* Enable downshift and setting it to X6 */
636 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
637 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
638 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
639
640 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
641 if (ret_val)
642 goto out;
643
644 /* Commit the changes. */
645 ret_val = igb_phy_sw_reset(hw);
646 if (ret_val) {
647 hw_dbg("Error committing the PHY changes\n");
648 goto out;
649 }
650
651out:
652 return ret_val;
653}
654
655/**
656 * igb_copper_link_setup_igp - Setup igp PHY's for copper link
657 * @hw: pointer to the HW structure
658 *
659 * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
660 * igp PHY's.
661 **/
662s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
663{
664 struct e1000_phy_info *phy = &hw->phy;
665 s32 ret_val;
666 u16 data;
667
668 if (phy->reset_disable) {
669 ret_val = 0;
670 goto out;
671 }
672
673 ret_val = phy->ops.reset(hw);
674 if (ret_val) {
675 hw_dbg("Error resetting the PHY.\n");
676 goto out;
677 }
678
679 /*
680 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
681 * timeout issues when LFS is enabled.
682 */
683 msleep(100);
684
685 /*
686 * The NVM settings will configure LPLU in D3 for
687 * non-IGP1 PHYs.
688 */
689 if (phy->type == e1000_phy_igp) {
690 /* disable lplu d3 during driver init */
691 if (phy->ops.set_d3_lplu_state)
692 ret_val = phy->ops.set_d3_lplu_state(hw, false);
693 if (ret_val) {
694 hw_dbg("Error Disabling LPLU D3\n");
695 goto out;
696 }
697 }
698
699 /* disable lplu d0 during driver init */
700 ret_val = phy->ops.set_d0_lplu_state(hw, false);
701 if (ret_val) {
702 hw_dbg("Error Disabling LPLU D0\n");
703 goto out;
704 }
705 /* Configure mdi-mdix settings */
706 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
707 if (ret_val)
708 goto out;
709
710 data &= ~IGP01E1000_PSCR_AUTO_MDIX;
711
712 switch (phy->mdix) {
713 case 1:
714 data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
715 break;
716 case 2:
717 data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
718 break;
719 case 0:
720 default:
721 data |= IGP01E1000_PSCR_AUTO_MDIX;
722 break;
723 }
724 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
725 if (ret_val)
726 goto out;
727
728 /* set auto-master slave resolution settings */
729 if (hw->mac.autoneg) {
730 /*
731 * when autonegotiation advertisement is only 1000Mbps then we
732 * should disable SmartSpeed and enable Auto MasterSlave
733 * resolution as hardware default.
734 */
735 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
736 /* Disable SmartSpeed */
737 ret_val = phy->ops.read_reg(hw,
738 IGP01E1000_PHY_PORT_CONFIG,
739 &data);
740 if (ret_val)
741 goto out;
742
743 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
744 ret_val = phy->ops.write_reg(hw,
745 IGP01E1000_PHY_PORT_CONFIG,
746 data);
747 if (ret_val)
748 goto out;
749
750 /* Set auto Master/Slave resolution process */
751 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
752 if (ret_val)
753 goto out;
754
755 data &= ~CR_1000T_MS_ENABLE;
756 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
757 if (ret_val)
758 goto out;
759 }
760
761 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
762 if (ret_val)
763 goto out;
764
765 /* load defaults for future use */
766 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
767 ((data & CR_1000T_MS_VALUE) ?
768 e1000_ms_force_master :
769 e1000_ms_force_slave) :
770 e1000_ms_auto;
771
772 switch (phy->ms_type) {
773 case e1000_ms_force_master:
774 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
775 break;
776 case e1000_ms_force_slave:
777 data |= CR_1000T_MS_ENABLE;
778 data &= ~(CR_1000T_MS_VALUE);
779 break;
780 case e1000_ms_auto:
781 data &= ~CR_1000T_MS_ENABLE;
782 default:
783 break;
784 }
785 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
786 if (ret_val)
787 goto out;
788 }
789
790out:
791 return ret_val;
792}
793
794/**
795 * igb_copper_link_autoneg - Setup/Enable autoneg for copper link
796 * @hw: pointer to the HW structure
797 *
798 * Performs initial bounds checking on autoneg advertisement parameter, then
799 * configure to advertise the full capability. Setup the PHY to autoneg
800 * and restart the negotiation process between the link partner. If
801 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
802 **/
803static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
804{
805 struct e1000_phy_info *phy = &hw->phy;
806 s32 ret_val;
807 u16 phy_ctrl;
808
809 /*
810 * Perform some bounds checking on the autoneg advertisement
811 * parameter.
812 */
813 phy->autoneg_advertised &= phy->autoneg_mask;
814
815 /*
816 * If autoneg_advertised is zero, we assume it was not defaulted
817 * by the calling code so we set to advertise full capability.
818 */
819 if (phy->autoneg_advertised == 0)
820 phy->autoneg_advertised = phy->autoneg_mask;
821
822 hw_dbg("Reconfiguring auto-neg advertisement params\n");
823 ret_val = igb_phy_setup_autoneg(hw);
824 if (ret_val) {
825 hw_dbg("Error Setting up Auto-Negotiation\n");
826 goto out;
827 }
828 hw_dbg("Restarting Auto-Neg\n");
829
830 /*
831 * Restart auto-negotiation by setting the Auto Neg Enable bit and
832 * the Auto Neg Restart bit in the PHY control register.
833 */
834 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
835 if (ret_val)
836 goto out;
837
838 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
839 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
840 if (ret_val)
841 goto out;
842
843 /*
844 * Does the user want to wait for Auto-Neg to complete here, or
845 * check at a later time (for example, callback routine).
846 */
847 if (phy->autoneg_wait_to_complete) {
848 ret_val = igb_wait_autoneg(hw);
849 if (ret_val) {
850 hw_dbg("Error while waiting for "
851 "autoneg to complete\n");
852 goto out;
853 }
854 }
855
856 hw->mac.get_link_status = true;
857
858out:
859 return ret_val;
860}
861
862/**
863 * igb_phy_setup_autoneg - Configure PHY for auto-negotiation
864 * @hw: pointer to the HW structure
865 *
866 * Reads the MII auto-neg advertisement register and/or the 1000T control
867 * register and if the PHY is already setup for auto-negotiation, then
868 * return successful. Otherwise, setup advertisement and flow control to
869 * the appropriate values for the wanted auto-negotiation.
870 **/
871static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
872{
873 struct e1000_phy_info *phy = &hw->phy;
874 s32 ret_val;
875 u16 mii_autoneg_adv_reg;
876 u16 mii_1000t_ctrl_reg = 0;
877
878 phy->autoneg_advertised &= phy->autoneg_mask;
879
880 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
881 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
882 if (ret_val)
883 goto out;
884
885 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
886 /* Read the MII 1000Base-T Control Register (Address 9). */
887 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
888 &mii_1000t_ctrl_reg);
889 if (ret_val)
890 goto out;
891 }
892
893 /*
894 * Need to parse both autoneg_advertised and fc and set up
895 * the appropriate PHY registers. First we will parse for
896 * autoneg_advertised software override. Since we can advertise
897 * a plethora of combinations, we need to check each bit
898 * individually.
899 */
900
901 /*
902 * First we clear all the 10/100 mb speed bits in the Auto-Neg
903 * Advertisement Register (Address 4) and the 1000 mb speed bits in
904 * the 1000Base-T Control Register (Address 9).
905 */
906 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
907 NWAY_AR_100TX_HD_CAPS |
908 NWAY_AR_10T_FD_CAPS |
909 NWAY_AR_10T_HD_CAPS);
910 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
911
912 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
913
914 /* Do we want to advertise 10 Mb Half Duplex? */
915 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
916 hw_dbg("Advertise 10mb Half duplex\n");
917 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
918 }
919
920 /* Do we want to advertise 10 Mb Full Duplex? */
921 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
922 hw_dbg("Advertise 10mb Full duplex\n");
923 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
924 }
925
926 /* Do we want to advertise 100 Mb Half Duplex? */
927 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
928 hw_dbg("Advertise 100mb Half duplex\n");
929 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
930 }
931
932 /* Do we want to advertise 100 Mb Full Duplex? */
933 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
934 hw_dbg("Advertise 100mb Full duplex\n");
935 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
936 }
937
938 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
939 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
940 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
941
942 /* Do we want to advertise 1000 Mb Full Duplex? */
943 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
944 hw_dbg("Advertise 1000mb Full duplex\n");
945 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
946 }
947
948 /*
949 * Check for a software override of the flow control settings, and
950 * setup the PHY advertisement registers accordingly. If
951 * auto-negotiation is enabled, then software will have to set the
952 * "PAUSE" bits to the correct value in the Auto-Negotiation
953 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
954 * negotiation.
955 *
956 * The possible values of the "fc" parameter are:
957 * 0: Flow control is completely disabled
958 * 1: Rx flow control is enabled (we can receive pause frames
959 * but not send pause frames).
960 * 2: Tx flow control is enabled (we can send pause frames
961 * but we do not support receiving pause frames).
962 * 3: Both Rx and TX flow control (symmetric) are enabled.
963 * other: No software override. The flow control configuration
964 * in the EEPROM is used.
965 */
966 switch (hw->fc.current_mode) {
967 case e1000_fc_none:
968 /*
969 * Flow control (RX & TX) is completely disabled by a
970 * software over-ride.
971 */
972 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
973 break;
974 case e1000_fc_rx_pause:
975 /*
976 * RX Flow control is enabled, and TX Flow control is
977 * disabled, by a software over-ride.
978 *
979 * Since there really isn't a way to advertise that we are
980 * capable of RX Pause ONLY, we will advertise that we
981 * support both symmetric and asymmetric RX PAUSE. Later
982 * (in e1000_config_fc_after_link_up) we will disable the
983 * hw's ability to send PAUSE frames.
984 */
985 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
986 break;
987 case e1000_fc_tx_pause:
988 /*
989 * TX Flow control is enabled, and RX Flow control is
990 * disabled, by a software over-ride.
991 */
992 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
993 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
994 break;
995 case e1000_fc_full:
996 /*
997 * Flow control (both RX and TX) is enabled by a software
998 * over-ride.
999 */
1000 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1001 break;
1002 default:
1003 hw_dbg("Flow control param set incorrectly\n");
1004 ret_val = -E1000_ERR_CONFIG;
1005 goto out;
1006 }
1007
1008 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
1009 if (ret_val)
1010 goto out;
1011
1012 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1013
1014 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
1015 ret_val = phy->ops.write_reg(hw,
1016 PHY_1000T_CTRL,
1017 mii_1000t_ctrl_reg);
1018 if (ret_val)
1019 goto out;
1020 }
1021
1022out:
1023 return ret_val;
1024}
1025
1026/**
1027 * igb_setup_copper_link - Configure copper link settings
1028 * @hw: pointer to the HW structure
1029 *
1030 * Calls the appropriate function to configure the link for auto-neg or forced
1031 * speed and duplex. Then we check for link, once link is established calls
1032 * to configure collision distance and flow control are called. If link is
1033 * not established, we return -E1000_ERR_PHY (-2).
1034 **/
1035s32 igb_setup_copper_link(struct e1000_hw *hw)
1036{
1037 s32 ret_val;
1038 bool link;
1039
1040
1041 if (hw->mac.autoneg) {
1042 /*
1043 * Setup autoneg and flow control advertisement and perform
1044 * autonegotiation.
1045 */
1046 ret_val = igb_copper_link_autoneg(hw);
1047 if (ret_val)
1048 goto out;
1049 } else {
1050 /*
1051 * PHY will be set to 10H, 10F, 100H or 100F
1052 * depending on user settings.
1053 */
1054 hw_dbg("Forcing Speed and Duplex\n");
1055 ret_val = hw->phy.ops.force_speed_duplex(hw);
1056 if (ret_val) {
1057 hw_dbg("Error Forcing Speed and Duplex\n");
1058 goto out;
1059 }
1060 }
1061
1062 /*
1063 * Check link status. Wait up to 100 microseconds for link to become
1064 * valid.
1065 */
1066 ret_val = igb_phy_has_link(hw,
1067 COPPER_LINK_UP_LIMIT,
1068 10,
1069 &link);
1070 if (ret_val)
1071 goto out;
1072
1073 if (link) {
1074 hw_dbg("Valid link established!!!\n");
1075 igb_config_collision_dist(hw);
1076 ret_val = igb_config_fc_after_link_up(hw);
1077 } else {
1078 hw_dbg("Unable to establish link!!!\n");
1079 }
1080
1081out:
1082 return ret_val;
1083}
1084
1085/**
1086 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
1087 * @hw: pointer to the HW structure
1088 *
1089 * Calls the PHY setup function to force speed and duplex. Clears the
1090 * auto-crossover to force MDI manually. Waits for link and returns
1091 * successful if link up is successful, else -E1000_ERR_PHY (-2).
1092 **/
1093s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1094{
1095 struct e1000_phy_info *phy = &hw->phy;
1096 s32 ret_val;
1097 u16 phy_data;
1098 bool link;
1099
1100 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
1101 if (ret_val)
1102 goto out;
1103
1104 igb_phy_force_speed_duplex_setup(hw, &phy_data);
1105
1106 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
1107 if (ret_val)
1108 goto out;
1109
1110 /*
1111 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
1112 * forced whenever speed and duplex are forced.
1113 */
1114 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1115 if (ret_val)
1116 goto out;
1117
1118 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1119 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1120
1121 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1122 if (ret_val)
1123 goto out;
1124
1125 hw_dbg("IGP PSCR: %X\n", phy_data);
1126
1127 udelay(1);
1128
1129 if (phy->autoneg_wait_to_complete) {
1130 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
1131
1132 ret_val = igb_phy_has_link(hw,
1133 PHY_FORCE_LIMIT,
1134 100000,
1135 &link);
1136 if (ret_val)
1137 goto out;
1138
1139 if (!link)
1140 hw_dbg("Link taking longer than expected.\n");
1141
1142 /* Try once more */
1143 ret_val = igb_phy_has_link(hw,
1144 PHY_FORCE_LIMIT,
1145 100000,
1146 &link);
1147 if (ret_val)
1148 goto out;
1149 }
1150
1151out:
1152 return ret_val;
1153}
1154
1155/**
1156 * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
1157 * @hw: pointer to the HW structure
1158 *
1159 * Calls the PHY setup function to force speed and duplex. Clears the
1160 * auto-crossover to force MDI manually. Resets the PHY to commit the
1161 * changes. If time expires while waiting for link up, we reset the DSP.
1162 * After reset, TX_CLK and CRS on TX must be set. Return successful upon
1163 * successful completion, else return corresponding error code.
1164 **/
1165s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1166{
1167 struct e1000_phy_info *phy = &hw->phy;
1168 s32 ret_val;
1169 u16 phy_data;
1170 bool link;
1171
1172 /*
1173 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
1174 * forced whenever speed and duplex are forced.
1175 */
1176 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1177 if (ret_val)
1178 goto out;
1179
1180 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1181 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1182 if (ret_val)
1183 goto out;
1184
1185 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
1186
1187 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
1188 if (ret_val)
1189 goto out;
1190
1191 igb_phy_force_speed_duplex_setup(hw, &phy_data);
1192
1193 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
1194 if (ret_val)
1195 goto out;
1196
1197 /* Reset the phy to commit changes. */
1198 ret_val = igb_phy_sw_reset(hw);
1199 if (ret_val)
1200 goto out;
1201
1202 if (phy->autoneg_wait_to_complete) {
1203 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
1204
1205 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
1206 if (ret_val)
1207 goto out;
1208
1209 if (!link) {
1210 if (hw->phy.type != e1000_phy_m88 ||
1211 hw->phy.id == I347AT4_E_PHY_ID ||
1212 hw->phy.id == M88E1112_E_PHY_ID) {
1213 hw_dbg("Link taking longer than expected.\n");
1214 } else {
1215
1216 /*
1217 * We didn't get link.
1218 * Reset the DSP and cross our fingers.
1219 */
1220 ret_val = phy->ops.write_reg(hw,
1221 M88E1000_PHY_PAGE_SELECT,
1222 0x001d);
1223 if (ret_val)
1224 goto out;
1225 ret_val = igb_phy_reset_dsp(hw);
1226 if (ret_val)
1227 goto out;
1228 }
1229 }
1230
1231 /* Try once more */
1232 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
1233 100000, &link);
1234 if (ret_val)
1235 goto out;
1236 }
1237
1238 if (hw->phy.type != e1000_phy_m88 ||
1239 hw->phy.id == I347AT4_E_PHY_ID ||
1240 hw->phy.id == M88E1112_E_PHY_ID)
1241 goto out;
1242
1243 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1244 if (ret_val)
1245 goto out;
1246
1247 /*
1248 * Resetting the phy means we need to re-force TX_CLK in the
1249 * Extended PHY Specific Control Register to 25MHz clock from
1250 * the reset value of 2.5MHz.
1251 */
1252 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1253 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1254 if (ret_val)
1255 goto out;
1256
1257 /*
1258 * In addition, we must re-enable CRS on Tx for both half and full
1259 * duplex.
1260 */
1261 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1262 if (ret_val)
1263 goto out;
1264
1265 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1266 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1267
1268out:
1269 return ret_val;
1270}
1271
1272/**
1273 * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
1274 * @hw: pointer to the HW structure
1275 * @phy_ctrl: pointer to current value of PHY_CONTROL
1276 *
1277 * Forces speed and duplex on the PHY by doing the following: disable flow
1278 * control, force speed/duplex on the MAC, disable auto speed detection,
1279 * disable auto-negotiation, configure duplex, configure speed, configure
1280 * the collision distance, write configuration to CTRL register. The
1281 * caller must write to the PHY_CONTROL register for these settings to
1282 * take affect.
1283 **/
1284static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1285 u16 *phy_ctrl)
1286{
1287 struct e1000_mac_info *mac = &hw->mac;
1288 u32 ctrl;
1289
1290 /* Turn off flow control when forcing speed/duplex */
1291 hw->fc.current_mode = e1000_fc_none;
1292
1293 /* Force speed/duplex on the mac */
1294 ctrl = rd32(E1000_CTRL);
1295 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1296 ctrl &= ~E1000_CTRL_SPD_SEL;
1297
1298 /* Disable Auto Speed Detection */
1299 ctrl &= ~E1000_CTRL_ASDE;
1300
1301 /* Disable autoneg on the phy */
1302 *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
1303
1304 /* Forcing Full or Half Duplex? */
1305 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1306 ctrl &= ~E1000_CTRL_FD;
1307 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1308 hw_dbg("Half Duplex\n");
1309 } else {
1310 ctrl |= E1000_CTRL_FD;
1311 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1312 hw_dbg("Full Duplex\n");
1313 }
1314
1315 /* Forcing 10mb or 100mb? */
1316 if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
1317 ctrl |= E1000_CTRL_SPD_100;
1318 *phy_ctrl |= MII_CR_SPEED_100;
1319 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1320 hw_dbg("Forcing 100mb\n");
1321 } else {
1322 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1323 *phy_ctrl |= MII_CR_SPEED_10;
1324 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1325 hw_dbg("Forcing 10mb\n");
1326 }
1327
1328 igb_config_collision_dist(hw);
1329
1330 wr32(E1000_CTRL, ctrl);
1331}
1332
1333/**
1334 * igb_set_d3_lplu_state - Sets low power link up state for D3
1335 * @hw: pointer to the HW structure
1336 * @active: boolean used to enable/disable lplu
1337 *
1338 * Success returns 0, Failure returns 1
1339 *
1340 * The low power link up (lplu) state is set to the power management level D3
1341 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1342 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1343 * is used during Dx states where the power conservation is most important.
1344 * During driver activity, SmartSpeed should be enabled so performance is
1345 * maintained.
1346 **/
1347s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1348{
1349 struct e1000_phy_info *phy = &hw->phy;
1350 s32 ret_val = 0;
1351 u16 data;
1352
1353 if (!(hw->phy.ops.read_reg))
1354 goto out;
1355
1356 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1357 if (ret_val)
1358 goto out;
1359
1360 if (!active) {
1361 data &= ~IGP02E1000_PM_D3_LPLU;
1362 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1363 data);
1364 if (ret_val)
1365 goto out;
1366 /*
1367 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1368 * during Dx states where the power conservation is most
1369 * important. During driver activity we should enable
1370 * SmartSpeed, so performance is maintained.
1371 */
1372 if (phy->smart_speed == e1000_smart_speed_on) {
1373 ret_val = phy->ops.read_reg(hw,
1374 IGP01E1000_PHY_PORT_CONFIG,
1375 &data);
1376 if (ret_val)
1377 goto out;
1378
1379 data |= IGP01E1000_PSCFR_SMART_SPEED;
1380 ret_val = phy->ops.write_reg(hw,
1381 IGP01E1000_PHY_PORT_CONFIG,
1382 data);
1383 if (ret_val)
1384 goto out;
1385 } else if (phy->smart_speed == e1000_smart_speed_off) {
1386 ret_val = phy->ops.read_reg(hw,
1387 IGP01E1000_PHY_PORT_CONFIG,
1388 &data);
1389 if (ret_val)
1390 goto out;
1391
1392 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1393 ret_val = phy->ops.write_reg(hw,
1394 IGP01E1000_PHY_PORT_CONFIG,
1395 data);
1396 if (ret_val)
1397 goto out;
1398 }
1399 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1400 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1401 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1402 data |= IGP02E1000_PM_D3_LPLU;
1403 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1404 data);
1405 if (ret_val)
1406 goto out;
1407
1408 /* When LPLU is enabled, we should disable SmartSpeed */
1409 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1410 &data);
1411 if (ret_val)
1412 goto out;
1413
1414 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1415 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1416 data);
1417 }
1418
1419out:
1420 return ret_val;
1421}
1422
1423/**
1424 * igb_check_downshift - Checks whether a downshift in speed occurred
1425 * @hw: pointer to the HW structure
1426 *
1427 * Success returns 0, Failure returns 1
1428 *
1429 * A downshift is detected by querying the PHY link health.
1430 **/
1431s32 igb_check_downshift(struct e1000_hw *hw)
1432{
1433 struct e1000_phy_info *phy = &hw->phy;
1434 s32 ret_val;
1435 u16 phy_data, offset, mask;
1436
1437 switch (phy->type) {
1438 case e1000_phy_m88:
1439 case e1000_phy_gg82563:
1440 offset = M88E1000_PHY_SPEC_STATUS;
1441 mask = M88E1000_PSSR_DOWNSHIFT;
1442 break;
1443 case e1000_phy_igp_2:
1444 case e1000_phy_igp:
1445 case e1000_phy_igp_3:
1446 offset = IGP01E1000_PHY_LINK_HEALTH;
1447 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1448 break;
1449 default:
1450 /* speed downshift not supported */
1451 phy->speed_downgraded = false;
1452 ret_val = 0;
1453 goto out;
1454 }
1455
1456 ret_val = phy->ops.read_reg(hw, offset, &phy_data);
1457
1458 if (!ret_val)
1459 phy->speed_downgraded = (phy_data & mask) ? true : false;
1460
1461out:
1462 return ret_val;
1463}
1464
1465/**
1466 * igb_check_polarity_m88 - Checks the polarity.
1467 * @hw: pointer to the HW structure
1468 *
1469 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1470 *
1471 * Polarity is determined based on the PHY specific status register.
1472 **/
1473static s32 igb_check_polarity_m88(struct e1000_hw *hw)
1474{
1475 struct e1000_phy_info *phy = &hw->phy;
1476 s32 ret_val;
1477 u16 data;
1478
1479 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
1480
1481 if (!ret_val)
1482 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
1483 ? e1000_rev_polarity_reversed
1484 : e1000_rev_polarity_normal;
1485
1486 return ret_val;
1487}
1488
1489/**
1490 * igb_check_polarity_igp - Checks the polarity.
1491 * @hw: pointer to the HW structure
1492 *
1493 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1494 *
1495 * Polarity is determined based on the PHY port status register, and the
1496 * current speed (since there is no polarity at 100Mbps).
1497 **/
1498static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1499{
1500 struct e1000_phy_info *phy = &hw->phy;
1501 s32 ret_val;
1502 u16 data, offset, mask;
1503
1504 /*
1505 * Polarity is determined based on the speed of
1506 * our connection.
1507 */
1508 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1509 if (ret_val)
1510 goto out;
1511
1512 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1513 IGP01E1000_PSSR_SPEED_1000MBPS) {
1514 offset = IGP01E1000_PHY_PCS_INIT_REG;
1515 mask = IGP01E1000_PHY_POLARITY_MASK;
1516 } else {
1517 /*
1518 * This really only applies to 10Mbps since
1519 * there is no polarity for 100Mbps (always 0).
1520 */
1521 offset = IGP01E1000_PHY_PORT_STATUS;
1522 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1523 }
1524
1525 ret_val = phy->ops.read_reg(hw, offset, &data);
1526
1527 if (!ret_val)
1528 phy->cable_polarity = (data & mask)
1529 ? e1000_rev_polarity_reversed
1530 : e1000_rev_polarity_normal;
1531
1532out:
1533 return ret_val;
1534}
1535
1536/**
1537 * igb_wait_autoneg - Wait for auto-neg compeletion
1538 * @hw: pointer to the HW structure
1539 *
1540 * Waits for auto-negotiation to complete or for the auto-negotiation time
1541 * limit to expire, which ever happens first.
1542 **/
1543static s32 igb_wait_autoneg(struct e1000_hw *hw)
1544{
1545 s32 ret_val = 0;
1546 u16 i, phy_status;
1547
1548 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1549 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1550 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1551 if (ret_val)
1552 break;
1553 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1554 if (ret_val)
1555 break;
1556 if (phy_status & MII_SR_AUTONEG_COMPLETE)
1557 break;
1558 msleep(100);
1559 }
1560
1561 /*
1562 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1563 * has completed.
1564 */
1565 return ret_val;
1566}
1567
1568/**
1569 * igb_phy_has_link - Polls PHY for link
1570 * @hw: pointer to the HW structure
1571 * @iterations: number of times to poll for link
1572 * @usec_interval: delay between polling attempts
1573 * @success: pointer to whether polling was successful or not
1574 *
1575 * Polls the PHY status register for link, 'iterations' number of times.
1576 **/
1577s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1578 u32 usec_interval, bool *success)
1579{
1580 s32 ret_val = 0;
1581 u16 i, phy_status;
1582
1583 for (i = 0; i < iterations; i++) {
1584 /*
1585 * Some PHYs require the PHY_STATUS register to be read
1586 * twice due to the link bit being sticky. No harm doing
1587 * it across the board.
1588 */
1589 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1590 if (ret_val) {
1591 /*
1592 * If the first read fails, another entity may have
1593 * ownership of the resources, wait and try again to
1594 * see if they have relinquished the resources yet.
1595 */
1596 udelay(usec_interval);
1597 }
1598 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1599 if (ret_val)
1600 break;
1601 if (phy_status & MII_SR_LINK_STATUS)
1602 break;
1603 if (usec_interval >= 1000)
1604 mdelay(usec_interval/1000);
1605 else
1606 udelay(usec_interval);
1607 }
1608
1609 *success = (i < iterations) ? true : false;
1610
1611 return ret_val;
1612}
1613
1614/**
1615 * igb_get_cable_length_m88 - Determine cable length for m88 PHY
1616 * @hw: pointer to the HW structure
1617 *
1618 * Reads the PHY specific status register to retrieve the cable length
1619 * information. The cable length is determined by averaging the minimum and
1620 * maximum values to get the "average" cable length. The m88 PHY has four
1621 * possible cable length values, which are:
1622 * Register Value Cable Length
1623 * 0 < 50 meters
1624 * 1 50 - 80 meters
1625 * 2 80 - 110 meters
1626 * 3 110 - 140 meters
1627 * 4 > 140 meters
1628 **/
1629s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1630{
1631 struct e1000_phy_info *phy = &hw->phy;
1632 s32 ret_val;
1633 u16 phy_data, index;
1634
1635 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1636 if (ret_val)
1637 goto out;
1638
1639 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1640 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1641 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1642 ret_val = -E1000_ERR_PHY;
1643 goto out;
1644 }
1645
1646 phy->min_cable_length = e1000_m88_cable_length_table[index];
1647 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1648
1649 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1650
1651out:
1652 return ret_val;
1653}
1654
1655s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1656{
1657 struct e1000_phy_info *phy = &hw->phy;
1658 s32 ret_val;
1659 u16 phy_data, phy_data2, index, default_page, is_cm;
1660
1661 switch (hw->phy.id) {
1662 case I347AT4_E_PHY_ID:
1663 /* Remember the original page select and set it to 7 */
1664 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1665 &default_page);
1666 if (ret_val)
1667 goto out;
1668
1669 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
1670 if (ret_val)
1671 goto out;
1672
1673 /* Get cable length from PHY Cable Diagnostics Control Reg */
1674 ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
1675 &phy_data);
1676 if (ret_val)
1677 goto out;
1678
1679 /* Check if the unit of cable length is meters or cm */
1680 ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
1681 if (ret_val)
1682 goto out;
1683
1684 is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1685
1686 /* Populate the phy structure with cable length in meters */
1687 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1688 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1689 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1690
1691 /* Reset the page selec to its original value */
1692 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1693 default_page);
1694 if (ret_val)
1695 goto out;
1696 break;
1697 case M88E1112_E_PHY_ID:
1698 /* Remember the original page select and set it to 5 */
1699 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1700 &default_page);
1701 if (ret_val)
1702 goto out;
1703
1704 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
1705 if (ret_val)
1706 goto out;
1707
1708 ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
1709 &phy_data);
1710 if (ret_val)
1711 goto out;
1712
1713 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1714 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1715 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1716 ret_val = -E1000_ERR_PHY;
1717 goto out;
1718 }
1719
1720 phy->min_cable_length = e1000_m88_cable_length_table[index];
1721 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1722
1723 phy->cable_length = (phy->min_cable_length +
1724 phy->max_cable_length) / 2;
1725
1726 /* Reset the page select to its original value */
1727 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1728 default_page);
1729 if (ret_val)
1730 goto out;
1731
1732 break;
1733 default:
1734 ret_val = -E1000_ERR_PHY;
1735 goto out;
1736 }
1737
1738out:
1739 return ret_val;
1740}
1741
1742/**
1743 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1744 * @hw: pointer to the HW structure
1745 *
1746 * The automatic gain control (agc) normalizes the amplitude of the
1747 * received signal, adjusting for the attenuation produced by the
1748 * cable. By reading the AGC registers, which represent the
1749 * combination of coarse and fine gain value, the value can be put
1750 * into a lookup table to obtain the approximate cable length
1751 * for each channel.
1752 **/
1753s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1754{
1755 struct e1000_phy_info *phy = &hw->phy;
1756 s32 ret_val = 0;
1757 u16 phy_data, i, agc_value = 0;
1758 u16 cur_agc_index, max_agc_index = 0;
1759 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1760 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1761 IGP02E1000_PHY_AGC_A,
1762 IGP02E1000_PHY_AGC_B,
1763 IGP02E1000_PHY_AGC_C,
1764 IGP02E1000_PHY_AGC_D
1765 };
1766
1767 /* Read the AGC registers for all channels */
1768 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1769 ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
1770 if (ret_val)
1771 goto out;
1772
1773 /*
1774 * Getting bits 15:9, which represent the combination of
1775 * coarse and fine gain values. The result is a number
1776 * that can be put into the lookup table to obtain the
1777 * approximate cable length.
1778 */
1779 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1780 IGP02E1000_AGC_LENGTH_MASK;
1781
1782 /* Array index bound check. */
1783 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
1784 (cur_agc_index == 0)) {
1785 ret_val = -E1000_ERR_PHY;
1786 goto out;
1787 }
1788
1789 /* Remove min & max AGC values from calculation. */
1790 if (e1000_igp_2_cable_length_table[min_agc_index] >
1791 e1000_igp_2_cable_length_table[cur_agc_index])
1792 min_agc_index = cur_agc_index;
1793 if (e1000_igp_2_cable_length_table[max_agc_index] <
1794 e1000_igp_2_cable_length_table[cur_agc_index])
1795 max_agc_index = cur_agc_index;
1796
1797 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
1798 }
1799
1800 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
1801 e1000_igp_2_cable_length_table[max_agc_index]);
1802 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1803
1804 /* Calculate cable length with the error range of +/- 10 meters. */
1805 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1806 (agc_value - IGP02E1000_AGC_RANGE) : 0;
1807 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1808
1809 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1810
1811out:
1812 return ret_val;
1813}
1814
1815/**
1816 * igb_get_phy_info_m88 - Retrieve PHY information
1817 * @hw: pointer to the HW structure
1818 *
1819 * Valid for only copper links. Read the PHY status register (sticky read)
1820 * to verify that link is up. Read the PHY special control register to
1821 * determine the polarity and 10base-T extended distance. Read the PHY
1822 * special status register to determine MDI/MDIx and current speed. If
1823 * speed is 1000, then determine cable length, local and remote receiver.
1824 **/
1825s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1826{
1827 struct e1000_phy_info *phy = &hw->phy;
1828 s32 ret_val;
1829 u16 phy_data;
1830 bool link;
1831
1832 if (phy->media_type != e1000_media_type_copper) {
1833 hw_dbg("Phy info is only valid for copper media\n");
1834 ret_val = -E1000_ERR_CONFIG;
1835 goto out;
1836 }
1837
1838 ret_val = igb_phy_has_link(hw, 1, 0, &link);
1839 if (ret_val)
1840 goto out;
1841
1842 if (!link) {
1843 hw_dbg("Phy info is only valid if link is up\n");
1844 ret_val = -E1000_ERR_CONFIG;
1845 goto out;
1846 }
1847
1848 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1849 if (ret_val)
1850 goto out;
1851
1852 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
1853 ? true : false;
1854
1855 ret_val = igb_check_polarity_m88(hw);
1856 if (ret_val)
1857 goto out;
1858
1859 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1860 if (ret_val)
1861 goto out;
1862
1863 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
1864
1865 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1866 ret_val = phy->ops.get_cable_length(hw);
1867 if (ret_val)
1868 goto out;
1869
1870 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
1871 if (ret_val)
1872 goto out;
1873
1874 phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
1875 ? e1000_1000t_rx_status_ok
1876 : e1000_1000t_rx_status_not_ok;
1877
1878 phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
1879 ? e1000_1000t_rx_status_ok
1880 : e1000_1000t_rx_status_not_ok;
1881 } else {
1882 /* Set values to "undefined" */
1883 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1884 phy->local_rx = e1000_1000t_rx_status_undefined;
1885 phy->remote_rx = e1000_1000t_rx_status_undefined;
1886 }
1887
1888out:
1889 return ret_val;
1890}
1891
1892/**
1893 * igb_get_phy_info_igp - Retrieve igp PHY information
1894 * @hw: pointer to the HW structure
1895 *
1896 * Read PHY status to determine if link is up. If link is up, then
1897 * set/determine 10base-T extended distance and polarity correction. Read
1898 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
1899 * determine on the cable length, local and remote receiver.
1900 **/
1901s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1902{
1903 struct e1000_phy_info *phy = &hw->phy;
1904 s32 ret_val;
1905 u16 data;
1906 bool link;
1907
1908 ret_val = igb_phy_has_link(hw, 1, 0, &link);
1909 if (ret_val)
1910 goto out;
1911
1912 if (!link) {
1913 hw_dbg("Phy info is only valid if link is up\n");
1914 ret_val = -E1000_ERR_CONFIG;
1915 goto out;
1916 }
1917
1918 phy->polarity_correction = true;
1919
1920 ret_val = igb_check_polarity_igp(hw);
1921 if (ret_val)
1922 goto out;
1923
1924 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1925 if (ret_val)
1926 goto out;
1927
1928 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
1929
1930 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1931 IGP01E1000_PSSR_SPEED_1000MBPS) {
1932 ret_val = phy->ops.get_cable_length(hw);
1933 if (ret_val)
1934 goto out;
1935
1936 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
1937 if (ret_val)
1938 goto out;
1939
1940 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
1941 ? e1000_1000t_rx_status_ok
1942 : e1000_1000t_rx_status_not_ok;
1943
1944 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
1945 ? e1000_1000t_rx_status_ok
1946 : e1000_1000t_rx_status_not_ok;
1947 } else {
1948 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1949 phy->local_rx = e1000_1000t_rx_status_undefined;
1950 phy->remote_rx = e1000_1000t_rx_status_undefined;
1951 }
1952
1953out:
1954 return ret_val;
1955}
1956
1957/**
1958 * igb_phy_sw_reset - PHY software reset
1959 * @hw: pointer to the HW structure
1960 *
1961 * Does a software reset of the PHY by reading the PHY control register and
1962 * setting/write the control register reset bit to the PHY.
1963 **/
1964s32 igb_phy_sw_reset(struct e1000_hw *hw)
1965{
1966 s32 ret_val = 0;
1967 u16 phy_ctrl;
1968
1969 if (!(hw->phy.ops.read_reg))
1970 goto out;
1971
1972 ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
1973 if (ret_val)
1974 goto out;
1975
1976 phy_ctrl |= MII_CR_RESET;
1977 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
1978 if (ret_val)
1979 goto out;
1980
1981 udelay(1);
1982
1983out:
1984 return ret_val;
1985}
1986
1987/**
1988 * igb_phy_hw_reset - PHY hardware reset
1989 * @hw: pointer to the HW structure
1990 *
1991 * Verify the reset block is not blocking us from resetting. Acquire
1992 * semaphore (if necessary) and read/set/write the device control reset
1993 * bit in the PHY. Wait the appropriate delay time for the device to
1994 * reset and relase the semaphore (if necessary).
1995 **/
1996s32 igb_phy_hw_reset(struct e1000_hw *hw)
1997{
1998 struct e1000_phy_info *phy = &hw->phy;
1999 s32 ret_val;
2000 u32 ctrl;
2001
2002 ret_val = igb_check_reset_block(hw);
2003 if (ret_val) {
2004 ret_val = 0;
2005 goto out;
2006 }
2007
2008 ret_val = phy->ops.acquire(hw);
2009 if (ret_val)
2010 goto out;
2011
2012 ctrl = rd32(E1000_CTRL);
2013 wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
2014 wrfl();
2015
2016 udelay(phy->reset_delay_us);
2017
2018 wr32(E1000_CTRL, ctrl);
2019 wrfl();
2020
2021 udelay(150);
2022
2023 phy->ops.release(hw);
2024
2025 ret_val = phy->ops.get_cfg_done(hw);
2026
2027out:
2028 return ret_val;
2029}
2030
2031/**
2032 * igb_phy_init_script_igp3 - Inits the IGP3 PHY
2033 * @hw: pointer to the HW structure
2034 *
2035 * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
2036 **/
2037s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2038{
2039 hw_dbg("Running IGP 3 PHY init script\n");
2040
2041 /* PHY init IGP 3 */
2042 /* Enable rise/fall, 10-mode work in class-A */
2043 hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
2044 /* Remove all caps from Replica path filter */
2045 hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
2046 /* Bias trimming for ADC, AFE and Driver (Default) */
2047 hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
2048 /* Increase Hybrid poly bias */
2049 hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
2050 /* Add 4% to TX amplitude in Giga mode */
2051 hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
2052 /* Disable trimming (TTT) */
2053 hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
2054 /* Poly DC correction to 94.6% + 2% for all channels */
2055 hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
2056 /* ABS DC correction to 95.9% */
2057 hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
2058 /* BG temp curve trim */
2059 hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
2060 /* Increasing ADC OPAMP stage 1 currents to max */
2061 hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
2062 /* Force 1000 ( required for enabling PHY regs configuration) */
2063 hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
2064 /* Set upd_freq to 6 */
2065 hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
2066 /* Disable NPDFE */
2067 hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
2068 /* Disable adaptive fixed FFE (Default) */
2069 hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
2070 /* Enable FFE hysteresis */
2071 hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
2072 /* Fixed FFE for short cable lengths */
2073 hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
2074 /* Fixed FFE for medium cable lengths */
2075 hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
2076 /* Fixed FFE for long cable lengths */
2077 hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
2078 /* Enable Adaptive Clip Threshold */
2079 hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
2080 /* AHT reset limit to 1 */
2081 hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
2082 /* Set AHT master delay to 127 msec */
2083 hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
2084 /* Set scan bits for AHT */
2085 hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
2086 /* Set AHT Preset bits */
2087 hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
2088 /* Change integ_factor of channel A to 3 */
2089 hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
2090 /* Change prop_factor of channels BCD to 8 */
2091 hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
2092 /* Change cg_icount + enable integbp for channels BCD */
2093 hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
2094 /*
2095 * Change cg_icount + enable integbp + change prop_factor_master
2096 * to 8 for channel A
2097 */
2098 hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
2099 /* Disable AHT in Slave mode on channel A */
2100 hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
2101 /*
2102 * Enable LPLU and disable AN to 1000 in non-D0a states,
2103 * Enable SPD+B2B
2104 */
2105 hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
2106 /* Enable restart AN on an1000_dis change */
2107 hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
2108 /* Enable wh_fifo read clock in 10/100 modes */
2109 hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
2110 /* Restart AN, Speed selection is 1000 */
2111 hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
2112
2113 return 0;
2114}
2115
2116/**
2117 * igb_power_up_phy_copper - Restore copper link in case of PHY power down
2118 * @hw: pointer to the HW structure
2119 *
2120 * In the case of a PHY power down to save power, or to turn off link during a
2121 * driver unload, restore the link to previous settings.
2122 **/
2123void igb_power_up_phy_copper(struct e1000_hw *hw)
2124{
2125 u16 mii_reg = 0;
2126
2127 /* The PHY will retain its settings across a power down/up cycle */
2128 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2129 mii_reg &= ~MII_CR_POWER_DOWN;
2130 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2131}
2132
2133/**
2134 * igb_power_down_phy_copper - Power down copper PHY
2135 * @hw: pointer to the HW structure
2136 *
2137 * Power down PHY to save power when interface is down and wake on lan
2138 * is not enabled.
2139 **/
2140void igb_power_down_phy_copper(struct e1000_hw *hw)
2141{
2142 u16 mii_reg = 0;
2143
2144 /* The PHY will retain its settings across a power down/up cycle */
2145 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2146 mii_reg |= MII_CR_POWER_DOWN;
2147 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2148 msleep(1);
2149}
2150
2151/**
2152 * igb_check_polarity_82580 - Checks the polarity.
2153 * @hw: pointer to the HW structure
2154 *
2155 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2156 *
2157 * Polarity is determined based on the PHY specific status register.
2158 **/
2159static s32 igb_check_polarity_82580(struct e1000_hw *hw)
2160{
2161 struct e1000_phy_info *phy = &hw->phy;
2162 s32 ret_val;
2163 u16 data;
2164
2165
2166 ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
2167
2168 if (!ret_val)
2169 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
2170 ? e1000_rev_polarity_reversed
2171 : e1000_rev_polarity_normal;
2172
2173 return ret_val;
2174}
2175
2176/**
2177 * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
2178 * @hw: pointer to the HW structure
2179 *
2180 * Calls the PHY setup function to force speed and duplex. Clears the
2181 * auto-crossover to force MDI manually. Waits for link and returns
2182 * successful if link up is successful, else -E1000_ERR_PHY (-2).
2183 **/
2184s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2185{
2186 struct e1000_phy_info *phy = &hw->phy;
2187 s32 ret_val;
2188 u16 phy_data;
2189 bool link;
2190
2191
2192 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
2193 if (ret_val)
2194 goto out;
2195
2196 igb_phy_force_speed_duplex_setup(hw, &phy_data);
2197
2198 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
2199 if (ret_val)
2200 goto out;
2201
2202 /*
2203 * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
2204 * forced whenever speed and duplex are forced.
2205 */
2206 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
2207 if (ret_val)
2208 goto out;
2209
2210 phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
2211 phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
2212
2213 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
2214 if (ret_val)
2215 goto out;
2216
2217 hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
2218
2219 udelay(1);
2220
2221 if (phy->autoneg_wait_to_complete) {
2222 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
2223
2224 ret_val = igb_phy_has_link(hw,
2225 PHY_FORCE_LIMIT,
2226 100000,
2227 &link);
2228 if (ret_val)
2229 goto out;
2230
2231 if (!link)
2232 hw_dbg("Link taking longer than expected.\n");
2233
2234 /* Try once more */
2235 ret_val = igb_phy_has_link(hw,
2236 PHY_FORCE_LIMIT,
2237 100000,
2238 &link);
2239 if (ret_val)
2240 goto out;
2241 }
2242
2243out:
2244 return ret_val;
2245}
2246
2247/**
2248 * igb_get_phy_info_82580 - Retrieve I82580 PHY information
2249 * @hw: pointer to the HW structure
2250 *
2251 * Read PHY status to determine if link is up. If link is up, then
2252 * set/determine 10base-T extended distance and polarity correction. Read
2253 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
2254 * determine on the cable length, local and remote receiver.
2255 **/
2256s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2257{
2258 struct e1000_phy_info *phy = &hw->phy;
2259 s32 ret_val;
2260 u16 data;
2261 bool link;
2262
2263
2264 ret_val = igb_phy_has_link(hw, 1, 0, &link);
2265 if (ret_val)
2266 goto out;
2267
2268 if (!link) {
2269 hw_dbg("Phy info is only valid if link is up\n");
2270 ret_val = -E1000_ERR_CONFIG;
2271 goto out;
2272 }
2273
2274 phy->polarity_correction = true;
2275
2276 ret_val = igb_check_polarity_82580(hw);
2277 if (ret_val)
2278 goto out;
2279
2280 ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
2281 if (ret_val)
2282 goto out;
2283
2284 phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
2285
2286 if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
2287 I82580_PHY_STATUS2_SPEED_1000MBPS) {
2288 ret_val = hw->phy.ops.get_cable_length(hw);
2289 if (ret_val)
2290 goto out;
2291
2292 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
2293 if (ret_val)
2294 goto out;
2295
2296 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2297 ? e1000_1000t_rx_status_ok
2298 : e1000_1000t_rx_status_not_ok;
2299
2300 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2301 ? e1000_1000t_rx_status_ok
2302 : e1000_1000t_rx_status_not_ok;
2303 } else {
2304 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2305 phy->local_rx = e1000_1000t_rx_status_undefined;
2306 phy->remote_rx = e1000_1000t_rx_status_undefined;
2307 }
2308
2309out:
2310 return ret_val;
2311}
2312
2313/**
2314 * igb_get_cable_length_82580 - Determine cable length for 82580 PHY
2315 * @hw: pointer to the HW structure
2316 *
2317 * Reads the diagnostic status register and verifies result is valid before
2318 * placing it in the phy_cable_length field.
2319 **/
2320s32 igb_get_cable_length_82580(struct e1000_hw *hw)
2321{
2322 struct e1000_phy_info *phy = &hw->phy;
2323 s32 ret_val;
2324 u16 phy_data, length;
2325
2326
2327 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
2328 if (ret_val)
2329 goto out;
2330
2331 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
2332 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
2333
2334 if (length == E1000_CABLE_LENGTH_UNDEFINED)
2335 ret_val = -E1000_ERR_PHY;
2336
2337 phy->cable_length = length;
2338
2339out:
2340 return ret_val;
2341}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
new file mode 100644
index 00000000000..8510797b9d8
--- /dev/null
+++ b/drivers/net/igb/e1000_phy.h
@@ -0,0 +1,136 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_PHY_H_
29#define _E1000_PHY_H_
30
31enum e1000_ms_type {
32 e1000_ms_hw_default = 0,
33 e1000_ms_force_master,
34 e1000_ms_force_slave,
35 e1000_ms_auto
36};
37
38enum e1000_smart_speed {
39 e1000_smart_speed_default = 0,
40 e1000_smart_speed_on,
41 e1000_smart_speed_off
42};
43
44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
50s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
51s32 igb_get_cable_length_m88(struct e1000_hw *hw);
52s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
53s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
54s32 igb_get_phy_id(struct e1000_hw *hw);
55s32 igb_get_phy_info_igp(struct e1000_hw *hw);
56s32 igb_get_phy_info_m88(struct e1000_hw *hw);
57s32 igb_phy_sw_reset(struct e1000_hw *hw);
58s32 igb_phy_hw_reset(struct e1000_hw *hw);
59s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
60s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
61s32 igb_setup_copper_link(struct e1000_hw *hw);
62s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
63s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
64 u32 usec_interval, bool *success);
65void igb_power_up_phy_copper(struct e1000_hw *hw);
66void igb_power_down_phy_copper(struct e1000_hw *hw);
67s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
68s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
69s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
72s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
73s32 igb_get_phy_info_82580(struct e1000_hw *hw);
74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
75s32 igb_get_cable_length_82580(struct e1000_hw *hw);
76
77/* IGP01E1000 Specific Registers */
78#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
79#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
80#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
81#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
82#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
83#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
84#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
85#define IGP01E1000_PHY_POLARITY_MASK 0x0078
86#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
87#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
88#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
89
90#define I82580_ADDR_REG 16
91#define I82580_CFG_REG 22
92#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
93#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
94#define I82580_CTRL_REG 23
95#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
96
97/* 82580 specific PHY registers */
98#define I82580_PHY_CTRL_2 18
99#define I82580_PHY_LBK_CTRL 19
100#define I82580_PHY_STATUS_2 26
101#define I82580_PHY_DIAG_STATUS 31
102
103/* I82580 PHY Status 2 */
104#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
105#define I82580_PHY_STATUS2_MDIX 0x0800
106#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
107#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
108#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
109
110/* I82580 PHY Control 2 */
111#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
112#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
113
114/* I82580 PHY Diagnostics Status */
115#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
116#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
117/* Enable flexible speed on link-up */
118#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
119#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
120#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
121#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
122#define IGP01E1000_PSSR_MDIX 0x0800
123#define IGP01E1000_PSSR_SPEED_MASK 0xC000
124#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
125#define IGP02E1000_PHY_CHANNEL_NUM 4
126#define IGP02E1000_PHY_AGC_A 0x11B1
127#define IGP02E1000_PHY_AGC_B 0x12B1
128#define IGP02E1000_PHY_AGC_C 0x14B1
129#define IGP02E1000_PHY_AGC_D 0x18B1
130#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
131#define IGP02E1000_AGC_LENGTH_MASK 0x7F
132#define IGP02E1000_AGC_RANGE 15
133
134#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
135
136#endif
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
new file mode 100644
index 00000000000..0990f6d860c
--- /dev/null
+++ b/drivers/net/igb/e1000_regs.h
@@ -0,0 +1,354 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_
30
31#define E1000_CTRL 0x00000 /* Device Control - RW */
32#define E1000_STATUS 0x00008 /* Device Status - RO */
33#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
34#define E1000_EERD 0x00014 /* EEPROM Read - RW */
35#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
36#define E1000_MDIC 0x00020 /* MDI Control - RW */
37#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
38#define E1000_SCTL 0x00024 /* SerDes Control - RW */
39#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
40#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
41#define E1000_FCT 0x00030 /* Flow Control Type - RW */
42#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
43#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
47#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
48#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
49#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
50#define E1000_RCTL 0x00100 /* RX Control - RW */
51#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
52#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
53#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
54#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
55#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
56#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
57#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
58#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
59#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
60#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
61#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
62#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
63#define E1000_TCTL 0x00400 /* TX Control - RW */
64#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
65#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
66#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
67#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
68#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
69#define E1000_PBS 0x01008 /* Packet Buffer Size */
70#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
71#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
72#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
73#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
74#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
75#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
76#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
77#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
78
79/* IEEE 1588 TIMESYNCH */
80#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
81#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
82#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
83#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
84#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
85#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
86#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
87#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
88#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
89#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
90#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
94
95/* Filtering Registers */
96#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
97#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
98#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
99#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
100#define E1000_SAQF0 E1000_SAQF(0)
101#define E1000_DAQF0 E1000_DAQF(0)
102#define E1000_SPQF0 E1000_SPQF(0)
103#define E1000_FTQF0 E1000_FTQF(0)
104#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
105#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
106
107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
108
109/* DMA Coalescing registers */
110#define E1000_DMACR 0x02508 /* Control Register */
111#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
112#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
113#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
114#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
115#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
116#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
117
118/* TX Rate Limit Registers */
119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
120#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
121
122/* Split and Replication RX Control - RW */
123#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
124/*
125 * Convenience macros
126 *
127 * Note: "_n" is the queue number of the register to be written to.
128 *
129 * Example usage:
130 * E1000_RDBAL_REG(current_rx_queue)
131 */
132#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
133 : (0x0C000 + ((_n) * 0x40)))
134#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
135 : (0x0C004 + ((_n) * 0x40)))
136#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
137 : (0x0C008 + ((_n) * 0x40)))
138#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
139 : (0x0C00C + ((_n) * 0x40)))
140#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
141 : (0x0C010 + ((_n) * 0x40)))
142#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
143 : (0x0C018 + ((_n) * 0x40)))
144#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
145 : (0x0C028 + ((_n) * 0x40)))
146#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
147 : (0x0E000 + ((_n) * 0x40)))
148#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
149 : (0x0E004 + ((_n) * 0x40)))
150#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
151 : (0x0E008 + ((_n) * 0x40)))
152#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
153 : (0x0E010 + ((_n) * 0x40)))
154#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
155 : (0x0E018 + ((_n) * 0x40)))
156#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
157 : (0x0E028 + ((_n) * 0x40)))
158#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
159#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
160#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
161 : (0x0E038 + ((_n) * 0x40)))
162#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
163 : (0x0E03C + ((_n) * 0x40)))
164#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
165#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
166#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
167#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
168#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
169#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
170#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
171#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
172#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
173#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
174#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
175#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
176#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
177#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
178#define E1000_COLC 0x04028 /* Collision Count - R/clr */
179#define E1000_DC 0x04030 /* Defer Count - R/clr */
180#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
181#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
182#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
183#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
184#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
185#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
186#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
187#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
188#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
189#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
190#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
191#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
192#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
193#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
194#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
195#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
196#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
197#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
198#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
199#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
200#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
201#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
202#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
203#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
204#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
205#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
206#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
207#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
208#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
209#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
210#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
211#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
212#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
213#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
214#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
215#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
216#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
217#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
218#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
219#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
220#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
221#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
222#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
223#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
224#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
225#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
226#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
227#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
228/* Interrupt Cause Rx Packet Timer Expire Count */
229#define E1000_ICRXPTC 0x04104
230/* Interrupt Cause Rx Absolute Timer Expire Count */
231#define E1000_ICRXATC 0x04108
232/* Interrupt Cause Tx Packet Timer Expire Count */
233#define E1000_ICTXPTC 0x0410C
234/* Interrupt Cause Tx Absolute Timer Expire Count */
235#define E1000_ICTXATC 0x04110
236/* Interrupt Cause Tx Queue Empty Count */
237#define E1000_ICTXQEC 0x04118
238/* Interrupt Cause Tx Queue Minimum Threshold Count */
239#define E1000_ICTXQMTC 0x0411C
240/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
241#define E1000_ICRXDMTC 0x04120
242#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
243#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
244#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
245#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
246#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
247#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
248#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
249#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
250#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
251#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
252#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
253#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
254#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
255#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
256#define E1000_LENERRS 0x04138 /* Length Errors Count */
257#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
258#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
259#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
260#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
261#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
262#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
263#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
264#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
265#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
266#define E1000_RA 0x05400 /* Receive Address - RW Array */
267#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
268#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
269#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
270 (0x054E0 + ((_i - 16) * 8)))
271#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
272 (0x054E4 + ((_i - 16) * 8)))
273#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
274#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
275#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
276#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
277#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
278#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
279#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
280#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
281#define E1000_WUC 0x05800 /* Wakeup Control - RW */
282#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
283#define E1000_WUS 0x05810 /* Wakeup Status - RO */
284#define E1000_MANC 0x05820 /* Management Control - RW */
285#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
286#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
287
288#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
289#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
290#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
291#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
292#define E1000_GCR 0x05B00 /* PCI-Ex Control */
293#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
294#define E1000_SWSM 0x05B50 /* SW Semaphore */
295#define E1000_FWSM 0x05B54 /* FW Semaphore */
296#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
297
298/* RSS registers */
299#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
300#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
301#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
302#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
303/* MSI-X Allocation Register (_i) - RW */
304#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
305/* Redirection Table - RW Array */
306#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
307#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
308
309/* VT Registers */
310#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
311#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
312#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
313#define E1000_VFRE 0x00C8C /* VF Receive Enables */
314#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
315#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
316#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
317#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
318#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
319#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
320#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
321/* These act per VF so an array friendly macro is used */
322#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
323#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
324#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
325#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
326 * Filter - RW */
327#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
328
329#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
330#define rd32(reg) (readl(hw->hw_addr + reg))
331#define wrfl() ((void)rd32(E1000_STATUS))
332
333#define array_wr32(reg, offset, value) \
334 (writel(value, hw->hw_addr + reg + ((offset) << 2)))
335#define array_rd32(reg, offset) \
336 (readl(hw->hw_addr + reg + ((offset) << 2)))
337
338/* DMA Coalescing registers */
339#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
340
341/* Energy Efficient Ethernet "EEE" register */
342#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
343#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
344
345/* Thermal Sensor Register */
346#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
347
348/* OS2BMC Registers */
349#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
350#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
351#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
352#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
353
354#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
new file mode 100644
index 00000000000..265e151b66c
--- /dev/null
+++ b/drivers/net/igb/igb.h
@@ -0,0 +1,415 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/* Linux PRO/1000 Ethernet Driver main header file */
30
31#ifndef _IGB_H_
32#define _IGB_H_
33
34#include "e1000_mac.h"
35#include "e1000_82575.h"
36
37#include <linux/clocksource.h>
38#include <linux/timecompare.h>
39#include <linux/net_tstamp.h>
40#include <linux/bitops.h>
41#include <linux/if_vlan.h>
42
43struct igb_adapter;
44
45/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
46#define IGB_START_ITR 648
47
48/* TX/RX descriptor defines */
49#define IGB_DEFAULT_TXD 256
50#define IGB_MIN_TXD 80
51#define IGB_MAX_TXD 4096
52
53#define IGB_DEFAULT_RXD 256
54#define IGB_MIN_RXD 80
55#define IGB_MAX_RXD 4096
56
57#define IGB_DEFAULT_ITR 3 /* dynamic */
58#define IGB_MAX_ITR_USECS 10000
59#define IGB_MIN_ITR_USECS 10
60#define NON_Q_VECTORS 1
61#define MAX_Q_VECTORS 8
62
63/* Transmit and receive queues */
64#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
65 (hw->mac.type > e1000_82575 ? 8 : 4))
66#define IGB_ABS_MAX_TX_QUEUES 8
67#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
68
69#define IGB_MAX_VF_MC_ENTRIES 30
70#define IGB_MAX_VF_FUNCTIONS 8
71#define IGB_MAX_VFTA_ENTRIES 128
72
73struct vf_data_storage {
74 unsigned char vf_mac_addresses[ETH_ALEN];
75 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
76 u16 num_vf_mc_hashes;
77 u16 vlans_enabled;
78 u32 flags;
79 unsigned long last_nack;
80 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
81 u16 pf_qos;
82 u16 tx_rate;
83};
84
85#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
86#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
87#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
88#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
89
90/* RX descriptor control thresholds.
91 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
92 * descriptors available in its onboard memory.
93 * Setting this to 0 disables RX descriptor prefetch.
94 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
95 * available in host memory.
96 * If PTHRESH is 0, this should also be 0.
97 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
98 * descriptors until either it has this many to write back, or the
99 * ITR timer expires.
100 */
101#define IGB_RX_PTHRESH 8
102#define IGB_RX_HTHRESH 8
103#define IGB_RX_WTHRESH 1
104#define IGB_TX_PTHRESH 8
105#define IGB_TX_HTHRESH 1
106#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
107 adapter->msix_entries) ? 1 : 16)
108
109/* this is the size past which hardware will drop packets when setting LPE=0 */
110#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
111
112/* Supported Rx Buffer Sizes */
113#define IGB_RXBUFFER_64 64 /* Used for packet split */
114#define IGB_RXBUFFER_128 128 /* Used for packet split */
115#define IGB_RXBUFFER_1024 1024
116#define IGB_RXBUFFER_2048 2048
117#define IGB_RXBUFFER_16384 16384
118
119#define MAX_STD_JUMBO_FRAME_SIZE 9234
120
121/* How many Tx Descriptors do we need to call netif_wake_queue ? */
122#define IGB_TX_QUEUE_WAKE 16
123/* How many Rx Buffers do we bundle into one write to the hardware ? */
124#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
125
126#define AUTO_ALL_MODES 0
127#define IGB_EEPROM_APME 0x0400
128
129#ifndef IGB_MASTER_SLAVE
130/* Switch to override PHY master/slave setting */
131#define IGB_MASTER_SLAVE e1000_ms_hw_default
132#endif
133
134#define IGB_MNG_VLAN_NONE -1
135
136/* wrapper around a pointer to a socket buffer,
137 * so a DMA handle can be stored along with the buffer */
138struct igb_buffer {
139 struct sk_buff *skb;
140 dma_addr_t dma;
141 union {
142 /* TX */
143 struct {
144 unsigned long time_stamp;
145 u16 length;
146 u16 next_to_watch;
147 unsigned int bytecount;
148 u16 gso_segs;
149 u8 tx_flags;
150 u8 mapped_as_page;
151 };
152 /* RX */
153 struct {
154 struct page *page;
155 dma_addr_t page_dma;
156 u16 page_offset;
157 };
158 };
159};
160
161struct igb_tx_queue_stats {
162 u64 packets;
163 u64 bytes;
164 u64 restart_queue;
165 u64 restart_queue2;
166};
167
168struct igb_rx_queue_stats {
169 u64 packets;
170 u64 bytes;
171 u64 drops;
172 u64 csum_err;
173 u64 alloc_failed;
174};
175
176struct igb_q_vector {
177 struct igb_adapter *adapter; /* backlink */
178 struct igb_ring *rx_ring;
179 struct igb_ring *tx_ring;
180 struct napi_struct napi;
181
182 u32 eims_value;
183 u16 cpu;
184
185 u16 itr_val;
186 u8 set_itr;
187 void __iomem *itr_register;
188
189 char name[IFNAMSIZ + 9];
190};
191
192struct igb_ring {
193 struct igb_q_vector *q_vector; /* backlink to q_vector */
194 struct net_device *netdev; /* back pointer to net_device */
195 struct device *dev; /* device pointer for dma mapping */
196 dma_addr_t dma; /* phys address of the ring */
197 void *desc; /* descriptor ring memory */
198 unsigned int size; /* length of desc. ring in bytes */
199 u16 count; /* number of desc. in the ring */
200 u16 next_to_use;
201 u16 next_to_clean;
202 u8 queue_index;
203 u8 reg_idx;
204 void __iomem *head;
205 void __iomem *tail;
206 struct igb_buffer *buffer_info; /* array of buffer info structs */
207
208 unsigned int total_bytes;
209 unsigned int total_packets;
210
211 u32 flags;
212
213 union {
214 /* TX */
215 struct {
216 struct igb_tx_queue_stats tx_stats;
217 struct u64_stats_sync tx_syncp;
218 struct u64_stats_sync tx_syncp2;
219 bool detect_tx_hung;
220 };
221 /* RX */
222 struct {
223 struct igb_rx_queue_stats rx_stats;
224 struct u64_stats_sync rx_syncp;
225 u32 rx_buffer_len;
226 };
227 };
228};
229
230#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
231#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
232
233#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
234
235#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
236
237#define E1000_RX_DESC_ADV(R, i) \
238 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
239#define E1000_TX_DESC_ADV(R, i) \
240 (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
241#define E1000_TX_CTXTDESC_ADV(R, i) \
242 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
243
244/* igb_desc_unused - calculate if we have unused descriptors */
245static inline int igb_desc_unused(struct igb_ring *ring)
246{
247 if (ring->next_to_clean > ring->next_to_use)
248 return ring->next_to_clean - ring->next_to_use - 1;
249
250 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
251}
252
253/* board specific private data structure */
254struct igb_adapter {
255 struct timer_list watchdog_timer;
256 struct timer_list phy_info_timer;
257 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
258 u16 mng_vlan_id;
259 u32 bd_number;
260 u32 wol;
261 u32 en_mng_pt;
262 u16 link_speed;
263 u16 link_duplex;
264
265 /* Interrupt Throttle Rate */
266 u32 rx_itr_setting;
267 u32 tx_itr_setting;
268 u16 tx_itr;
269 u16 rx_itr;
270
271 struct work_struct reset_task;
272 struct work_struct watchdog_task;
273 bool fc_autoneg;
274 u8 tx_timeout_factor;
275 struct timer_list blink_timer;
276 unsigned long led_status;
277
278 /* TX */
279 struct igb_ring *tx_ring[16];
280 u32 tx_timeout_count;
281
282 /* RX */
283 struct igb_ring *rx_ring[16];
284 int num_tx_queues;
285 int num_rx_queues;
286
287 u32 max_frame_size;
288 u32 min_frame_size;
289
290 /* OS defined structs */
291 struct net_device *netdev;
292 struct pci_dev *pdev;
293 struct cyclecounter cycles;
294 struct timecounter clock;
295 struct timecompare compare;
296 struct hwtstamp_config hwtstamp_config;
297
298 spinlock_t stats64_lock;
299 struct rtnl_link_stats64 stats64;
300
301 /* structs defined in e1000_hw.h */
302 struct e1000_hw hw;
303 struct e1000_hw_stats stats;
304 struct e1000_phy_info phy_info;
305 struct e1000_phy_stats phy_stats;
306
307 u32 test_icr;
308 struct igb_ring test_tx_ring;
309 struct igb_ring test_rx_ring;
310
311 int msg_enable;
312
313 unsigned int num_q_vectors;
314 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
315 struct msix_entry *msix_entries;
316 u32 eims_enable_mask;
317 u32 eims_other;
318
319 /* to not mess up cache alignment, always add to the bottom */
320 unsigned long state;
321 unsigned int flags;
322 u32 eeprom_wol;
323
324 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
325 u16 tx_ring_count;
326 u16 rx_ring_count;
327 unsigned int vfs_allocated_count;
328 struct vf_data_storage *vf_data;
329 int vf_rate_link_speed;
330 u32 rss_queues;
331 u32 wvbr;
332};
333
334#define IGB_FLAG_HAS_MSI (1 << 0)
335#define IGB_FLAG_DCA_ENABLED (1 << 1)
336#define IGB_FLAG_QUAD_PORT_A (1 << 2)
337#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
338#define IGB_FLAG_DMAC (1 << 4)
339
340/* DMA Coalescing defines */
341#define IGB_MIN_TXPBSIZE 20408
342#define IGB_TX_BUF_4096 4096
343#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
344
345#define IGB_82576_TSYNC_SHIFT 19
346#define IGB_82580_TSYNC_SHIFT 24
347#define IGB_TS_HDR_LEN 16
348enum e1000_state_t {
349 __IGB_TESTING,
350 __IGB_RESETTING,
351 __IGB_DOWN
352};
353
354enum igb_boards {
355 board_82575,
356};
357
358extern char igb_driver_name[];
359extern char igb_driver_version[];
360
361extern int igb_up(struct igb_adapter *);
362extern void igb_down(struct igb_adapter *);
363extern void igb_reinit_locked(struct igb_adapter *);
364extern void igb_reset(struct igb_adapter *);
365extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
366extern int igb_setup_tx_resources(struct igb_ring *);
367extern int igb_setup_rx_resources(struct igb_ring *);
368extern void igb_free_tx_resources(struct igb_ring *);
369extern void igb_free_rx_resources(struct igb_ring *);
370extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
371extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
372extern void igb_setup_tctl(struct igb_adapter *);
373extern void igb_setup_rctl(struct igb_adapter *);
374extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
375extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
376 struct igb_buffer *);
377extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
378extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
379extern bool igb_has_link(struct igb_adapter *adapter);
380extern void igb_set_ethtool_ops(struct net_device *);
381extern void igb_power_up_link(struct igb_adapter *);
382
383static inline s32 igb_reset_phy(struct e1000_hw *hw)
384{
385 if (hw->phy.ops.reset)
386 return hw->phy.ops.reset(hw);
387
388 return 0;
389}
390
391static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
392{
393 if (hw->phy.ops.read_reg)
394 return hw->phy.ops.read_reg(hw, offset, data);
395
396 return 0;
397}
398
399static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
400{
401 if (hw->phy.ops.write_reg)
402 return hw->phy.ops.write_reg(hw, offset, data);
403
404 return 0;
405}
406
407static inline s32 igb_get_phy_info(struct e1000_hw *hw)
408{
409 if (hw->phy.ops.get_phy_info)
410 return hw->phy.ops.get_phy_info(hw);
411
412 return 0;
413}
414
415#endif /* _IGB_H_ */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
new file mode 100644
index 00000000000..414b0225be8
--- /dev/null
+++ b/drivers/net/igb/igb_ethtool.c
@@ -0,0 +1,2201 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igb */
29
30#include <linux/vmalloc.h>
31#include <linux/netdevice.h>
32#include <linux/pci.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/if_ether.h>
36#include <linux/ethtool.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39
40#include "igb.h"
41
42struct igb_stats {
43 char stat_string[ETH_GSTRING_LEN];
44 int sizeof_stat;
45 int stat_offset;
46};
47
48#define IGB_STAT(_name, _stat) { \
49 .stat_string = _name, \
50 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
51 .stat_offset = offsetof(struct igb_adapter, _stat) \
52}
53static const struct igb_stats igb_gstrings_stats[] = {
54 IGB_STAT("rx_packets", stats.gprc),
55 IGB_STAT("tx_packets", stats.gptc),
56 IGB_STAT("rx_bytes", stats.gorc),
57 IGB_STAT("tx_bytes", stats.gotc),
58 IGB_STAT("rx_broadcast", stats.bprc),
59 IGB_STAT("tx_broadcast", stats.bptc),
60 IGB_STAT("rx_multicast", stats.mprc),
61 IGB_STAT("tx_multicast", stats.mptc),
62 IGB_STAT("multicast", stats.mprc),
63 IGB_STAT("collisions", stats.colc),
64 IGB_STAT("rx_crc_errors", stats.crcerrs),
65 IGB_STAT("rx_no_buffer_count", stats.rnbc),
66 IGB_STAT("rx_missed_errors", stats.mpc),
67 IGB_STAT("tx_aborted_errors", stats.ecol),
68 IGB_STAT("tx_carrier_errors", stats.tncrs),
69 IGB_STAT("tx_window_errors", stats.latecol),
70 IGB_STAT("tx_abort_late_coll", stats.latecol),
71 IGB_STAT("tx_deferred_ok", stats.dc),
72 IGB_STAT("tx_single_coll_ok", stats.scc),
73 IGB_STAT("tx_multi_coll_ok", stats.mcc),
74 IGB_STAT("tx_timeout_count", tx_timeout_count),
75 IGB_STAT("rx_long_length_errors", stats.roc),
76 IGB_STAT("rx_short_length_errors", stats.ruc),
77 IGB_STAT("rx_align_errors", stats.algnerrc),
78 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
79 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
80 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
81 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
82 IGB_STAT("tx_flow_control_xon", stats.xontxc),
83 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
84 IGB_STAT("rx_long_byte_count", stats.gorc),
85 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
86 IGB_STAT("tx_smbus", stats.mgptc),
87 IGB_STAT("rx_smbus", stats.mgprc),
88 IGB_STAT("dropped_smbus", stats.mgpdc),
89 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
90 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
91 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
92 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
93};
94
95#define IGB_NETDEV_STAT(_net_stat) { \
96 .stat_string = __stringify(_net_stat), \
97 .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
98 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
99}
100static const struct igb_stats igb_gstrings_net_stats[] = {
101 IGB_NETDEV_STAT(rx_errors),
102 IGB_NETDEV_STAT(tx_errors),
103 IGB_NETDEV_STAT(tx_dropped),
104 IGB_NETDEV_STAT(rx_length_errors),
105 IGB_NETDEV_STAT(rx_over_errors),
106 IGB_NETDEV_STAT(rx_frame_errors),
107 IGB_NETDEV_STAT(rx_fifo_errors),
108 IGB_NETDEV_STAT(tx_fifo_errors),
109 IGB_NETDEV_STAT(tx_heartbeat_errors)
110};
111
112#define IGB_GLOBAL_STATS_LEN \
113 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
114#define IGB_NETDEV_STATS_LEN \
115 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
116#define IGB_RX_QUEUE_STATS_LEN \
117 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
118
119#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
120
121#define IGB_QUEUE_STATS_LEN \
122 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
123 IGB_RX_QUEUE_STATS_LEN) + \
124 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
125 IGB_TX_QUEUE_STATS_LEN))
126#define IGB_STATS_LEN \
127 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
128
129static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
130 "Register test (offline)", "Eeprom test (offline)",
131 "Interrupt test (offline)", "Loopback test (offline)",
132 "Link test (on/offline)"
133};
134#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
135
136static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
137{
138 struct igb_adapter *adapter = netdev_priv(netdev);
139 struct e1000_hw *hw = &adapter->hw;
140 u32 status;
141
142 if (hw->phy.media_type == e1000_media_type_copper) {
143
144 ecmd->supported = (SUPPORTED_10baseT_Half |
145 SUPPORTED_10baseT_Full |
146 SUPPORTED_100baseT_Half |
147 SUPPORTED_100baseT_Full |
148 SUPPORTED_1000baseT_Full|
149 SUPPORTED_Autoneg |
150 SUPPORTED_TP);
151 ecmd->advertising = ADVERTISED_TP;
152
153 if (hw->mac.autoneg == 1) {
154 ecmd->advertising |= ADVERTISED_Autoneg;
155 /* the e1000 autoneg seems to match ethtool nicely */
156 ecmd->advertising |= hw->phy.autoneg_advertised;
157 }
158
159 ecmd->port = PORT_TP;
160 ecmd->phy_address = hw->phy.addr;
161 } else {
162 ecmd->supported = (SUPPORTED_1000baseT_Full |
163 SUPPORTED_FIBRE |
164 SUPPORTED_Autoneg);
165
166 ecmd->advertising = (ADVERTISED_1000baseT_Full |
167 ADVERTISED_FIBRE |
168 ADVERTISED_Autoneg);
169
170 ecmd->port = PORT_FIBRE;
171 }
172
173 ecmd->transceiver = XCVR_INTERNAL;
174
175 status = rd32(E1000_STATUS);
176
177 if (status & E1000_STATUS_LU) {
178
179 if ((status & E1000_STATUS_SPEED_1000) ||
180 hw->phy.media_type != e1000_media_type_copper)
181 ethtool_cmd_speed_set(ecmd, SPEED_1000);
182 else if (status & E1000_STATUS_SPEED_100)
183 ethtool_cmd_speed_set(ecmd, SPEED_100);
184 else
185 ethtool_cmd_speed_set(ecmd, SPEED_10);
186
187 if ((status & E1000_STATUS_FD) ||
188 hw->phy.media_type != e1000_media_type_copper)
189 ecmd->duplex = DUPLEX_FULL;
190 else
191 ecmd->duplex = DUPLEX_HALF;
192 } else {
193 ethtool_cmd_speed_set(ecmd, -1);
194 ecmd->duplex = -1;
195 }
196
197 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
198 return 0;
199}
200
201static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
202{
203 struct igb_adapter *adapter = netdev_priv(netdev);
204 struct e1000_hw *hw = &adapter->hw;
205
206 /* When SoL/IDER sessions are active, autoneg/speed/duplex
207 * cannot be changed */
208 if (igb_check_reset_block(hw)) {
209 dev_err(&adapter->pdev->dev, "Cannot change link "
210 "characteristics when SoL/IDER is active.\n");
211 return -EINVAL;
212 }
213
214 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
215 msleep(1);
216
217 if (ecmd->autoneg == AUTONEG_ENABLE) {
218 hw->mac.autoneg = 1;
219 hw->phy.autoneg_advertised = ecmd->advertising |
220 ADVERTISED_TP |
221 ADVERTISED_Autoneg;
222 ecmd->advertising = hw->phy.autoneg_advertised;
223 if (adapter->fc_autoneg)
224 hw->fc.requested_mode = e1000_fc_default;
225 } else {
226 u32 speed = ethtool_cmd_speed(ecmd);
227 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
228 clear_bit(__IGB_RESETTING, &adapter->state);
229 return -EINVAL;
230 }
231 }
232
233 /* reset the link */
234 if (netif_running(adapter->netdev)) {
235 igb_down(adapter);
236 igb_up(adapter);
237 } else
238 igb_reset(adapter);
239
240 clear_bit(__IGB_RESETTING, &adapter->state);
241 return 0;
242}
243
244static u32 igb_get_link(struct net_device *netdev)
245{
246 struct igb_adapter *adapter = netdev_priv(netdev);
247 struct e1000_mac_info *mac = &adapter->hw.mac;
248
249 /*
250 * If the link is not reported up to netdev, interrupts are disabled,
251 * and so the physical link state may have changed since we last
252 * looked. Set get_link_status to make sure that the true link
253 * state is interrogated, rather than pulling a cached and possibly
254 * stale link state from the driver.
255 */
256 if (!netif_carrier_ok(netdev))
257 mac->get_link_status = 1;
258
259 return igb_has_link(adapter);
260}
261
262static void igb_get_pauseparam(struct net_device *netdev,
263 struct ethtool_pauseparam *pause)
264{
265 struct igb_adapter *adapter = netdev_priv(netdev);
266 struct e1000_hw *hw = &adapter->hw;
267
268 pause->autoneg =
269 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
270
271 if (hw->fc.current_mode == e1000_fc_rx_pause)
272 pause->rx_pause = 1;
273 else if (hw->fc.current_mode == e1000_fc_tx_pause)
274 pause->tx_pause = 1;
275 else if (hw->fc.current_mode == e1000_fc_full) {
276 pause->rx_pause = 1;
277 pause->tx_pause = 1;
278 }
279}
280
281static int igb_set_pauseparam(struct net_device *netdev,
282 struct ethtool_pauseparam *pause)
283{
284 struct igb_adapter *adapter = netdev_priv(netdev);
285 struct e1000_hw *hw = &adapter->hw;
286 int retval = 0;
287
288 adapter->fc_autoneg = pause->autoneg;
289
290 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
291 msleep(1);
292
293 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
294 hw->fc.requested_mode = e1000_fc_default;
295 if (netif_running(adapter->netdev)) {
296 igb_down(adapter);
297 igb_up(adapter);
298 } else {
299 igb_reset(adapter);
300 }
301 } else {
302 if (pause->rx_pause && pause->tx_pause)
303 hw->fc.requested_mode = e1000_fc_full;
304 else if (pause->rx_pause && !pause->tx_pause)
305 hw->fc.requested_mode = e1000_fc_rx_pause;
306 else if (!pause->rx_pause && pause->tx_pause)
307 hw->fc.requested_mode = e1000_fc_tx_pause;
308 else if (!pause->rx_pause && !pause->tx_pause)
309 hw->fc.requested_mode = e1000_fc_none;
310
311 hw->fc.current_mode = hw->fc.requested_mode;
312
313 retval = ((hw->phy.media_type == e1000_media_type_copper) ?
314 igb_force_mac_fc(hw) : igb_setup_link(hw));
315 }
316
317 clear_bit(__IGB_RESETTING, &adapter->state);
318 return retval;
319}
320
321static u32 igb_get_msglevel(struct net_device *netdev)
322{
323 struct igb_adapter *adapter = netdev_priv(netdev);
324 return adapter->msg_enable;
325}
326
327static void igb_set_msglevel(struct net_device *netdev, u32 data)
328{
329 struct igb_adapter *adapter = netdev_priv(netdev);
330 adapter->msg_enable = data;
331}
332
333static int igb_get_regs_len(struct net_device *netdev)
334{
335#define IGB_REGS_LEN 551
336 return IGB_REGS_LEN * sizeof(u32);
337}
338
339static void igb_get_regs(struct net_device *netdev,
340 struct ethtool_regs *regs, void *p)
341{
342 struct igb_adapter *adapter = netdev_priv(netdev);
343 struct e1000_hw *hw = &adapter->hw;
344 u32 *regs_buff = p;
345 u8 i;
346
347 memset(p, 0, IGB_REGS_LEN * sizeof(u32));
348
349 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
350
351 /* General Registers */
352 regs_buff[0] = rd32(E1000_CTRL);
353 regs_buff[1] = rd32(E1000_STATUS);
354 regs_buff[2] = rd32(E1000_CTRL_EXT);
355 regs_buff[3] = rd32(E1000_MDIC);
356 regs_buff[4] = rd32(E1000_SCTL);
357 regs_buff[5] = rd32(E1000_CONNSW);
358 regs_buff[6] = rd32(E1000_VET);
359 regs_buff[7] = rd32(E1000_LEDCTL);
360 regs_buff[8] = rd32(E1000_PBA);
361 regs_buff[9] = rd32(E1000_PBS);
362 regs_buff[10] = rd32(E1000_FRTIMER);
363 regs_buff[11] = rd32(E1000_TCPTIMER);
364
365 /* NVM Register */
366 regs_buff[12] = rd32(E1000_EECD);
367
368 /* Interrupt */
369 /* Reading EICS for EICR because they read the
370 * same but EICS does not clear on read */
371 regs_buff[13] = rd32(E1000_EICS);
372 regs_buff[14] = rd32(E1000_EICS);
373 regs_buff[15] = rd32(E1000_EIMS);
374 regs_buff[16] = rd32(E1000_EIMC);
375 regs_buff[17] = rd32(E1000_EIAC);
376 regs_buff[18] = rd32(E1000_EIAM);
377 /* Reading ICS for ICR because they read the
378 * same but ICS does not clear on read */
379 regs_buff[19] = rd32(E1000_ICS);
380 regs_buff[20] = rd32(E1000_ICS);
381 regs_buff[21] = rd32(E1000_IMS);
382 regs_buff[22] = rd32(E1000_IMC);
383 regs_buff[23] = rd32(E1000_IAC);
384 regs_buff[24] = rd32(E1000_IAM);
385 regs_buff[25] = rd32(E1000_IMIRVP);
386
387 /* Flow Control */
388 regs_buff[26] = rd32(E1000_FCAL);
389 regs_buff[27] = rd32(E1000_FCAH);
390 regs_buff[28] = rd32(E1000_FCTTV);
391 regs_buff[29] = rd32(E1000_FCRTL);
392 regs_buff[30] = rd32(E1000_FCRTH);
393 regs_buff[31] = rd32(E1000_FCRTV);
394
395 /* Receive */
396 regs_buff[32] = rd32(E1000_RCTL);
397 regs_buff[33] = rd32(E1000_RXCSUM);
398 regs_buff[34] = rd32(E1000_RLPML);
399 regs_buff[35] = rd32(E1000_RFCTL);
400 regs_buff[36] = rd32(E1000_MRQC);
401 regs_buff[37] = rd32(E1000_VT_CTL);
402
403 /* Transmit */
404 regs_buff[38] = rd32(E1000_TCTL);
405 regs_buff[39] = rd32(E1000_TCTL_EXT);
406 regs_buff[40] = rd32(E1000_TIPG);
407 regs_buff[41] = rd32(E1000_DTXCTL);
408
409 /* Wake Up */
410 regs_buff[42] = rd32(E1000_WUC);
411 regs_buff[43] = rd32(E1000_WUFC);
412 regs_buff[44] = rd32(E1000_WUS);
413 regs_buff[45] = rd32(E1000_IPAV);
414 regs_buff[46] = rd32(E1000_WUPL);
415
416 /* MAC */
417 regs_buff[47] = rd32(E1000_PCS_CFG0);
418 regs_buff[48] = rd32(E1000_PCS_LCTL);
419 regs_buff[49] = rd32(E1000_PCS_LSTAT);
420 regs_buff[50] = rd32(E1000_PCS_ANADV);
421 regs_buff[51] = rd32(E1000_PCS_LPAB);
422 regs_buff[52] = rd32(E1000_PCS_NPTX);
423 regs_buff[53] = rd32(E1000_PCS_LPABNP);
424
425 /* Statistics */
426 regs_buff[54] = adapter->stats.crcerrs;
427 regs_buff[55] = adapter->stats.algnerrc;
428 regs_buff[56] = adapter->stats.symerrs;
429 regs_buff[57] = adapter->stats.rxerrc;
430 regs_buff[58] = adapter->stats.mpc;
431 regs_buff[59] = adapter->stats.scc;
432 regs_buff[60] = adapter->stats.ecol;
433 regs_buff[61] = adapter->stats.mcc;
434 regs_buff[62] = adapter->stats.latecol;
435 regs_buff[63] = adapter->stats.colc;
436 regs_buff[64] = adapter->stats.dc;
437 regs_buff[65] = adapter->stats.tncrs;
438 regs_buff[66] = adapter->stats.sec;
439 regs_buff[67] = adapter->stats.htdpmc;
440 regs_buff[68] = adapter->stats.rlec;
441 regs_buff[69] = adapter->stats.xonrxc;
442 regs_buff[70] = adapter->stats.xontxc;
443 regs_buff[71] = adapter->stats.xoffrxc;
444 regs_buff[72] = adapter->stats.xofftxc;
445 regs_buff[73] = adapter->stats.fcruc;
446 regs_buff[74] = adapter->stats.prc64;
447 regs_buff[75] = adapter->stats.prc127;
448 regs_buff[76] = adapter->stats.prc255;
449 regs_buff[77] = adapter->stats.prc511;
450 regs_buff[78] = adapter->stats.prc1023;
451 regs_buff[79] = adapter->stats.prc1522;
452 regs_buff[80] = adapter->stats.gprc;
453 regs_buff[81] = adapter->stats.bprc;
454 regs_buff[82] = adapter->stats.mprc;
455 regs_buff[83] = adapter->stats.gptc;
456 regs_buff[84] = adapter->stats.gorc;
457 regs_buff[86] = adapter->stats.gotc;
458 regs_buff[88] = adapter->stats.rnbc;
459 regs_buff[89] = adapter->stats.ruc;
460 regs_buff[90] = adapter->stats.rfc;
461 regs_buff[91] = adapter->stats.roc;
462 regs_buff[92] = adapter->stats.rjc;
463 regs_buff[93] = adapter->stats.mgprc;
464 regs_buff[94] = adapter->stats.mgpdc;
465 regs_buff[95] = adapter->stats.mgptc;
466 regs_buff[96] = adapter->stats.tor;
467 regs_buff[98] = adapter->stats.tot;
468 regs_buff[100] = adapter->stats.tpr;
469 regs_buff[101] = adapter->stats.tpt;
470 regs_buff[102] = adapter->stats.ptc64;
471 regs_buff[103] = adapter->stats.ptc127;
472 regs_buff[104] = adapter->stats.ptc255;
473 regs_buff[105] = adapter->stats.ptc511;
474 regs_buff[106] = adapter->stats.ptc1023;
475 regs_buff[107] = adapter->stats.ptc1522;
476 regs_buff[108] = adapter->stats.mptc;
477 regs_buff[109] = adapter->stats.bptc;
478 regs_buff[110] = adapter->stats.tsctc;
479 regs_buff[111] = adapter->stats.iac;
480 regs_buff[112] = adapter->stats.rpthc;
481 regs_buff[113] = adapter->stats.hgptc;
482 regs_buff[114] = adapter->stats.hgorc;
483 regs_buff[116] = adapter->stats.hgotc;
484 regs_buff[118] = adapter->stats.lenerrs;
485 regs_buff[119] = adapter->stats.scvpc;
486 regs_buff[120] = adapter->stats.hrmpc;
487
488 for (i = 0; i < 4; i++)
489 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
490 for (i = 0; i < 4; i++)
491 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
492 for (i = 0; i < 4; i++)
493 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
494 for (i = 0; i < 4; i++)
495 regs_buff[133 + i] = rd32(E1000_RDBAH(i));
496 for (i = 0; i < 4; i++)
497 regs_buff[137 + i] = rd32(E1000_RDLEN(i));
498 for (i = 0; i < 4; i++)
499 regs_buff[141 + i] = rd32(E1000_RDH(i));
500 for (i = 0; i < 4; i++)
501 regs_buff[145 + i] = rd32(E1000_RDT(i));
502 for (i = 0; i < 4; i++)
503 regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
504
505 for (i = 0; i < 10; i++)
506 regs_buff[153 + i] = rd32(E1000_EITR(i));
507 for (i = 0; i < 8; i++)
508 regs_buff[163 + i] = rd32(E1000_IMIR(i));
509 for (i = 0; i < 8; i++)
510 regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
511 for (i = 0; i < 16; i++)
512 regs_buff[179 + i] = rd32(E1000_RAL(i));
513 for (i = 0; i < 16; i++)
514 regs_buff[195 + i] = rd32(E1000_RAH(i));
515
516 for (i = 0; i < 4; i++)
517 regs_buff[211 + i] = rd32(E1000_TDBAL(i));
518 for (i = 0; i < 4; i++)
519 regs_buff[215 + i] = rd32(E1000_TDBAH(i));
520 for (i = 0; i < 4; i++)
521 regs_buff[219 + i] = rd32(E1000_TDLEN(i));
522 for (i = 0; i < 4; i++)
523 regs_buff[223 + i] = rd32(E1000_TDH(i));
524 for (i = 0; i < 4; i++)
525 regs_buff[227 + i] = rd32(E1000_TDT(i));
526 for (i = 0; i < 4; i++)
527 regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
528 for (i = 0; i < 4; i++)
529 regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
530 for (i = 0; i < 4; i++)
531 regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
532 for (i = 0; i < 4; i++)
533 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
534
535 for (i = 0; i < 4; i++)
536 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
537 for (i = 0; i < 4; i++)
538 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
539 for (i = 0; i < 32; i++)
540 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
541 for (i = 0; i < 128; i++)
542 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
543 for (i = 0; i < 128; i++)
544 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
545 for (i = 0; i < 4; i++)
546 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
547
548 regs_buff[547] = rd32(E1000_TDFH);
549 regs_buff[548] = rd32(E1000_TDFT);
550 regs_buff[549] = rd32(E1000_TDFHS);
551 regs_buff[550] = rd32(E1000_TDFPC);
552 regs_buff[551] = adapter->stats.o2bgptc;
553 regs_buff[552] = adapter->stats.b2ospc;
554 regs_buff[553] = adapter->stats.o2bspc;
555 regs_buff[554] = adapter->stats.b2ogprc;
556}
557
558static int igb_get_eeprom_len(struct net_device *netdev)
559{
560 struct igb_adapter *adapter = netdev_priv(netdev);
561 return adapter->hw.nvm.word_size * 2;
562}
563
564static int igb_get_eeprom(struct net_device *netdev,
565 struct ethtool_eeprom *eeprom, u8 *bytes)
566{
567 struct igb_adapter *adapter = netdev_priv(netdev);
568 struct e1000_hw *hw = &adapter->hw;
569 u16 *eeprom_buff;
570 int first_word, last_word;
571 int ret_val = 0;
572 u16 i;
573
574 if (eeprom->len == 0)
575 return -EINVAL;
576
577 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
578
579 first_word = eeprom->offset >> 1;
580 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
581
582 eeprom_buff = kmalloc(sizeof(u16) *
583 (last_word - first_word + 1), GFP_KERNEL);
584 if (!eeprom_buff)
585 return -ENOMEM;
586
587 if (hw->nvm.type == e1000_nvm_eeprom_spi)
588 ret_val = hw->nvm.ops.read(hw, first_word,
589 last_word - first_word + 1,
590 eeprom_buff);
591 else {
592 for (i = 0; i < last_word - first_word + 1; i++) {
593 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
594 &eeprom_buff[i]);
595 if (ret_val)
596 break;
597 }
598 }
599
600 /* Device's eeprom is always little-endian, word addressable */
601 for (i = 0; i < last_word - first_word + 1; i++)
602 le16_to_cpus(&eeprom_buff[i]);
603
604 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
605 eeprom->len);
606 kfree(eeprom_buff);
607
608 return ret_val;
609}
610
611static int igb_set_eeprom(struct net_device *netdev,
612 struct ethtool_eeprom *eeprom, u8 *bytes)
613{
614 struct igb_adapter *adapter = netdev_priv(netdev);
615 struct e1000_hw *hw = &adapter->hw;
616 u16 *eeprom_buff;
617 void *ptr;
618 int max_len, first_word, last_word, ret_val = 0;
619 u16 i;
620
621 if (eeprom->len == 0)
622 return -EOPNOTSUPP;
623
624 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
625 return -EFAULT;
626
627 max_len = hw->nvm.word_size * 2;
628
629 first_word = eeprom->offset >> 1;
630 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
631 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
632 if (!eeprom_buff)
633 return -ENOMEM;
634
635 ptr = (void *)eeprom_buff;
636
637 if (eeprom->offset & 1) {
638 /* need read/modify/write of first changed EEPROM word */
639 /* only the second byte of the word is being modified */
640 ret_val = hw->nvm.ops.read(hw, first_word, 1,
641 &eeprom_buff[0]);
642 ptr++;
643 }
644 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
645 /* need read/modify/write of last changed EEPROM word */
646 /* only the first byte of the word is being modified */
647 ret_val = hw->nvm.ops.read(hw, last_word, 1,
648 &eeprom_buff[last_word - first_word]);
649 }
650
651 /* Device's eeprom is always little-endian, word addressable */
652 for (i = 0; i < last_word - first_word + 1; i++)
653 le16_to_cpus(&eeprom_buff[i]);
654
655 memcpy(ptr, bytes, eeprom->len);
656
657 for (i = 0; i < last_word - first_word + 1; i++)
658 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
659
660 ret_val = hw->nvm.ops.write(hw, first_word,
661 last_word - first_word + 1, eeprom_buff);
662
663 /* Update the checksum over the first part of the EEPROM if needed
664 * and flush shadow RAM for 82573 controllers */
665 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
666 hw->nvm.ops.update(hw);
667
668 kfree(eeprom_buff);
669 return ret_val;
670}
671
672static void igb_get_drvinfo(struct net_device *netdev,
673 struct ethtool_drvinfo *drvinfo)
674{
675 struct igb_adapter *adapter = netdev_priv(netdev);
676 char firmware_version[32];
677 u16 eeprom_data;
678
679 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
680 strncpy(drvinfo->version, igb_driver_version,
681 sizeof(drvinfo->version) - 1);
682
683 /* EEPROM image version # is reported as firmware version # for
684 * 82575 controllers */
685 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
686 sprintf(firmware_version, "%d.%d-%d",
687 (eeprom_data & 0xF000) >> 12,
688 (eeprom_data & 0x0FF0) >> 4,
689 eeprom_data & 0x000F);
690
691 strncpy(drvinfo->fw_version, firmware_version,
692 sizeof(drvinfo->fw_version) - 1);
693 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
694 sizeof(drvinfo->bus_info) - 1);
695 drvinfo->n_stats = IGB_STATS_LEN;
696 drvinfo->testinfo_len = IGB_TEST_LEN;
697 drvinfo->regdump_len = igb_get_regs_len(netdev);
698 drvinfo->eedump_len = igb_get_eeprom_len(netdev);
699}
700
701static void igb_get_ringparam(struct net_device *netdev,
702 struct ethtool_ringparam *ring)
703{
704 struct igb_adapter *adapter = netdev_priv(netdev);
705
706 ring->rx_max_pending = IGB_MAX_RXD;
707 ring->tx_max_pending = IGB_MAX_TXD;
708 ring->rx_mini_max_pending = 0;
709 ring->rx_jumbo_max_pending = 0;
710 ring->rx_pending = adapter->rx_ring_count;
711 ring->tx_pending = adapter->tx_ring_count;
712 ring->rx_mini_pending = 0;
713 ring->rx_jumbo_pending = 0;
714}
715
716static int igb_set_ringparam(struct net_device *netdev,
717 struct ethtool_ringparam *ring)
718{
719 struct igb_adapter *adapter = netdev_priv(netdev);
720 struct igb_ring *temp_ring;
721 int i, err = 0;
722 u16 new_rx_count, new_tx_count;
723
724 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
725 return -EINVAL;
726
727 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
728 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
729 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
730
731 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
732 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
733 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
734
735 if ((new_tx_count == adapter->tx_ring_count) &&
736 (new_rx_count == adapter->rx_ring_count)) {
737 /* nothing to do */
738 return 0;
739 }
740
741 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
742 msleep(1);
743
744 if (!netif_running(adapter->netdev)) {
745 for (i = 0; i < adapter->num_tx_queues; i++)
746 adapter->tx_ring[i]->count = new_tx_count;
747 for (i = 0; i < adapter->num_rx_queues; i++)
748 adapter->rx_ring[i]->count = new_rx_count;
749 adapter->tx_ring_count = new_tx_count;
750 adapter->rx_ring_count = new_rx_count;
751 goto clear_reset;
752 }
753
754 if (adapter->num_tx_queues > adapter->num_rx_queues)
755 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
756 else
757 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
758
759 if (!temp_ring) {
760 err = -ENOMEM;
761 goto clear_reset;
762 }
763
764 igb_down(adapter);
765
766 /*
767 * We can't just free everything and then setup again,
768 * because the ISRs in MSI-X mode get passed pointers
769 * to the tx and rx ring structs.
770 */
771 if (new_tx_count != adapter->tx_ring_count) {
772 for (i = 0; i < adapter->num_tx_queues; i++) {
773 memcpy(&temp_ring[i], adapter->tx_ring[i],
774 sizeof(struct igb_ring));
775
776 temp_ring[i].count = new_tx_count;
777 err = igb_setup_tx_resources(&temp_ring[i]);
778 if (err) {
779 while (i) {
780 i--;
781 igb_free_tx_resources(&temp_ring[i]);
782 }
783 goto err_setup;
784 }
785 }
786
787 for (i = 0; i < adapter->num_tx_queues; i++) {
788 igb_free_tx_resources(adapter->tx_ring[i]);
789
790 memcpy(adapter->tx_ring[i], &temp_ring[i],
791 sizeof(struct igb_ring));
792 }
793
794 adapter->tx_ring_count = new_tx_count;
795 }
796
797 if (new_rx_count != adapter->rx_ring_count) {
798 for (i = 0; i < adapter->num_rx_queues; i++) {
799 memcpy(&temp_ring[i], adapter->rx_ring[i],
800 sizeof(struct igb_ring));
801
802 temp_ring[i].count = new_rx_count;
803 err = igb_setup_rx_resources(&temp_ring[i]);
804 if (err) {
805 while (i) {
806 i--;
807 igb_free_rx_resources(&temp_ring[i]);
808 }
809 goto err_setup;
810 }
811
812 }
813
814 for (i = 0; i < adapter->num_rx_queues; i++) {
815 igb_free_rx_resources(adapter->rx_ring[i]);
816
817 memcpy(adapter->rx_ring[i], &temp_ring[i],
818 sizeof(struct igb_ring));
819 }
820
821 adapter->rx_ring_count = new_rx_count;
822 }
823err_setup:
824 igb_up(adapter);
825 vfree(temp_ring);
826clear_reset:
827 clear_bit(__IGB_RESETTING, &adapter->state);
828 return err;
829}
830
831/* ethtool register test data */
832struct igb_reg_test {
833 u16 reg;
834 u16 reg_offset;
835 u16 array_len;
836 u16 test_type;
837 u32 mask;
838 u32 write;
839};
840
841/* In the hardware, registers are laid out either singly, in arrays
842 * spaced 0x100 bytes apart, or in contiguous tables. We assume
843 * most tests take place on arrays or single registers (handled
844 * as a single-element array) and special-case the tables.
845 * Table tests are always pattern tests.
846 *
847 * We also make provision for some required setup steps by specifying
848 * registers to be written without any read-back testing.
849 */
850
851#define PATTERN_TEST 1
852#define SET_READ_TEST 2
853#define WRITE_NO_TEST 3
854#define TABLE32_TEST 4
855#define TABLE64_TEST_LO 5
856#define TABLE64_TEST_HI 6
857
858/* i350 reg test */
859static struct igb_reg_test reg_test_i350[] = {
860 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
861 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
862 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
863 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
864 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
865 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
866 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
867 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
868 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
869 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
870 /* RDH is read-only for i350, only test RDT. */
871 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
872 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
873 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
874 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
875 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
876 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
877 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
878 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
879 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
880 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
881 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
882 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
883 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
884 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
885 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
886 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
887 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
888 { E1000_RA, 0, 16, TABLE64_TEST_LO,
889 0xFFFFFFFF, 0xFFFFFFFF },
890 { E1000_RA, 0, 16, TABLE64_TEST_HI,
891 0xC3FFFFFF, 0xFFFFFFFF },
892 { E1000_RA2, 0, 16, TABLE64_TEST_LO,
893 0xFFFFFFFF, 0xFFFFFFFF },
894 { E1000_RA2, 0, 16, TABLE64_TEST_HI,
895 0xC3FFFFFF, 0xFFFFFFFF },
896 { E1000_MTA, 0, 128, TABLE32_TEST,
897 0xFFFFFFFF, 0xFFFFFFFF },
898 { 0, 0, 0, 0 }
899};
900
901/* 82580 reg test */
902static struct igb_reg_test reg_test_82580[] = {
903 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
904 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
905 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
906 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
907 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
908 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
909 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
910 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
911 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
912 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
913 /* RDH is read-only for 82580, only test RDT. */
914 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
915 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
916 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
917 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
918 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
919 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
920 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
921 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
922 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
923 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
924 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
925 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
926 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
927 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
928 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
929 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
930 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
931 { E1000_RA, 0, 16, TABLE64_TEST_LO,
932 0xFFFFFFFF, 0xFFFFFFFF },
933 { E1000_RA, 0, 16, TABLE64_TEST_HI,
934 0x83FFFFFF, 0xFFFFFFFF },
935 { E1000_RA2, 0, 8, TABLE64_TEST_LO,
936 0xFFFFFFFF, 0xFFFFFFFF },
937 { E1000_RA2, 0, 8, TABLE64_TEST_HI,
938 0x83FFFFFF, 0xFFFFFFFF },
939 { E1000_MTA, 0, 128, TABLE32_TEST,
940 0xFFFFFFFF, 0xFFFFFFFF },
941 { 0, 0, 0, 0 }
942};
943
944/* 82576 reg test */
945static struct igb_reg_test reg_test_82576[] = {
946 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
947 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
948 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
949 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
950 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
951 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
952 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
953 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
954 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
955 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
956 /* Enable all RX queues before testing. */
957 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
958 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
959 /* RDH is read-only for 82576, only test RDT. */
960 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
961 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
962 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
963 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
964 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
965 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
966 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
967 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
968 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
969 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
970 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
971 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
972 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
973 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
974 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
975 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
976 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
977 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
978 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
979 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
980 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
981 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
982 { 0, 0, 0, 0 }
983};
984
985/* 82575 register test */
986static struct igb_reg_test reg_test_82575[] = {
987 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
988 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
989 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
990 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
991 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
992 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
993 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
994 /* Enable all four RX queues before testing. */
995 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
996 /* RDH is read-only for 82575, only test RDT. */
997 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
998 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
999 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
1000 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1001 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
1002 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1003 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1004 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1005 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1006 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
1007 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
1008 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1009 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
1010 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1011 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
1012 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1013 { 0, 0, 0, 0 }
1014};
1015
1016static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1017 int reg, u32 mask, u32 write)
1018{
1019 struct e1000_hw *hw = &adapter->hw;
1020 u32 pat, val;
1021 static const u32 _test[] =
1022 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1023 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1024 wr32(reg, (_test[pat] & write));
1025 val = rd32(reg) & mask;
1026 if (val != (_test[pat] & write & mask)) {
1027 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
1028 "failed: got 0x%08X expected 0x%08X\n",
1029 reg, val, (_test[pat] & write & mask));
1030 *data = reg;
1031 return 1;
1032 }
1033 }
1034
1035 return 0;
1036}
1037
1038static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1039 int reg, u32 mask, u32 write)
1040{
1041 struct e1000_hw *hw = &adapter->hw;
1042 u32 val;
1043 wr32(reg, write & mask);
1044 val = rd32(reg);
1045 if ((write & mask) != (val & mask)) {
1046 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
1047 " got 0x%08X expected 0x%08X\n", reg,
1048 (val & mask), (write & mask));
1049 *data = reg;
1050 return 1;
1051 }
1052
1053 return 0;
1054}
1055
1056#define REG_PATTERN_TEST(reg, mask, write) \
1057 do { \
1058 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1059 return 1; \
1060 } while (0)
1061
1062#define REG_SET_AND_CHECK(reg, mask, write) \
1063 do { \
1064 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1065 return 1; \
1066 } while (0)
1067
1068static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1069{
1070 struct e1000_hw *hw = &adapter->hw;
1071 struct igb_reg_test *test;
1072 u32 value, before, after;
1073 u32 i, toggle;
1074
1075 switch (adapter->hw.mac.type) {
1076 case e1000_i350:
1077 test = reg_test_i350;
1078 toggle = 0x7FEFF3FF;
1079 break;
1080 case e1000_82580:
1081 test = reg_test_82580;
1082 toggle = 0x7FEFF3FF;
1083 break;
1084 case e1000_82576:
1085 test = reg_test_82576;
1086 toggle = 0x7FFFF3FF;
1087 break;
1088 default:
1089 test = reg_test_82575;
1090 toggle = 0x7FFFF3FF;
1091 break;
1092 }
1093
1094 /* Because the status register is such a special case,
1095 * we handle it separately from the rest of the register
1096 * tests. Some bits are read-only, some toggle, and some
1097 * are writable on newer MACs.
1098 */
1099 before = rd32(E1000_STATUS);
1100 value = (rd32(E1000_STATUS) & toggle);
1101 wr32(E1000_STATUS, toggle);
1102 after = rd32(E1000_STATUS) & toggle;
1103 if (value != after) {
1104 dev_err(&adapter->pdev->dev, "failed STATUS register test "
1105 "got: 0x%08X expected: 0x%08X\n", after, value);
1106 *data = 1;
1107 return 1;
1108 }
1109 /* restore previous status */
1110 wr32(E1000_STATUS, before);
1111
1112 /* Perform the remainder of the register test, looping through
1113 * the test table until we either fail or reach the null entry.
1114 */
1115 while (test->reg) {
1116 for (i = 0; i < test->array_len; i++) {
1117 switch (test->test_type) {
1118 case PATTERN_TEST:
1119 REG_PATTERN_TEST(test->reg +
1120 (i * test->reg_offset),
1121 test->mask,
1122 test->write);
1123 break;
1124 case SET_READ_TEST:
1125 REG_SET_AND_CHECK(test->reg +
1126 (i * test->reg_offset),
1127 test->mask,
1128 test->write);
1129 break;
1130 case WRITE_NO_TEST:
1131 writel(test->write,
1132 (adapter->hw.hw_addr + test->reg)
1133 + (i * test->reg_offset));
1134 break;
1135 case TABLE32_TEST:
1136 REG_PATTERN_TEST(test->reg + (i * 4),
1137 test->mask,
1138 test->write);
1139 break;
1140 case TABLE64_TEST_LO:
1141 REG_PATTERN_TEST(test->reg + (i * 8),
1142 test->mask,
1143 test->write);
1144 break;
1145 case TABLE64_TEST_HI:
1146 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1147 test->mask,
1148 test->write);
1149 break;
1150 }
1151 }
1152 test++;
1153 }
1154
1155 *data = 0;
1156 return 0;
1157}
1158
1159static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1160{
1161 u16 temp;
1162 u16 checksum = 0;
1163 u16 i;
1164
1165 *data = 0;
1166 /* Read and add up the contents of the EEPROM */
1167 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1168 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1169 *data = 1;
1170 break;
1171 }
1172 checksum += temp;
1173 }
1174
1175 /* If Checksum is not Correct return error else test passed */
1176 if ((checksum != (u16) NVM_SUM) && !(*data))
1177 *data = 2;
1178
1179 return *data;
1180}
1181
1182static irqreturn_t igb_test_intr(int irq, void *data)
1183{
1184 struct igb_adapter *adapter = (struct igb_adapter *) data;
1185 struct e1000_hw *hw = &adapter->hw;
1186
1187 adapter->test_icr |= rd32(E1000_ICR);
1188
1189 return IRQ_HANDLED;
1190}
1191
1192static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1193{
1194 struct e1000_hw *hw = &adapter->hw;
1195 struct net_device *netdev = adapter->netdev;
1196 u32 mask, ics_mask, i = 0, shared_int = true;
1197 u32 irq = adapter->pdev->irq;
1198
1199 *data = 0;
1200
1201 /* Hook up test interrupt handler just for this test */
1202 if (adapter->msix_entries) {
1203 if (request_irq(adapter->msix_entries[0].vector,
1204 igb_test_intr, 0, netdev->name, adapter)) {
1205 *data = 1;
1206 return -1;
1207 }
1208 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1209 shared_int = false;
1210 if (request_irq(irq,
1211 igb_test_intr, 0, netdev->name, adapter)) {
1212 *data = 1;
1213 return -1;
1214 }
1215 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1216 netdev->name, adapter)) {
1217 shared_int = false;
1218 } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
1219 netdev->name, adapter)) {
1220 *data = 1;
1221 return -1;
1222 }
1223 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1224 (shared_int ? "shared" : "unshared"));
1225
1226 /* Disable all the interrupts */
1227 wr32(E1000_IMC, ~0);
1228 wrfl();
1229 msleep(10);
1230
1231 /* Define all writable bits for ICS */
1232 switch (hw->mac.type) {
1233 case e1000_82575:
1234 ics_mask = 0x37F47EDD;
1235 break;
1236 case e1000_82576:
1237 ics_mask = 0x77D4FBFD;
1238 break;
1239 case e1000_82580:
1240 ics_mask = 0x77DCFED5;
1241 break;
1242 case e1000_i350:
1243 ics_mask = 0x77DCFED5;
1244 break;
1245 default:
1246 ics_mask = 0x7FFFFFFF;
1247 break;
1248 }
1249
1250 /* Test each interrupt */
1251 for (; i < 31; i++) {
1252 /* Interrupt to test */
1253 mask = 1 << i;
1254
1255 if (!(mask & ics_mask))
1256 continue;
1257
1258 if (!shared_int) {
1259 /* Disable the interrupt to be reported in
1260 * the cause register and then force the same
1261 * interrupt and see if one gets posted. If
1262 * an interrupt was posted to the bus, the
1263 * test failed.
1264 */
1265 adapter->test_icr = 0;
1266
1267 /* Flush any pending interrupts */
1268 wr32(E1000_ICR, ~0);
1269
1270 wr32(E1000_IMC, mask);
1271 wr32(E1000_ICS, mask);
1272 wrfl();
1273 msleep(10);
1274
1275 if (adapter->test_icr & mask) {
1276 *data = 3;
1277 break;
1278 }
1279 }
1280
1281 /* Enable the interrupt to be reported in
1282 * the cause register and then force the same
1283 * interrupt and see if one gets posted. If
1284 * an interrupt was not posted to the bus, the
1285 * test failed.
1286 */
1287 adapter->test_icr = 0;
1288
1289 /* Flush any pending interrupts */
1290 wr32(E1000_ICR, ~0);
1291
1292 wr32(E1000_IMS, mask);
1293 wr32(E1000_ICS, mask);
1294 wrfl();
1295 msleep(10);
1296
1297 if (!(adapter->test_icr & mask)) {
1298 *data = 4;
1299 break;
1300 }
1301
1302 if (!shared_int) {
1303 /* Disable the other interrupts to be reported in
1304 * the cause register and then force the other
1305 * interrupts and see if any get posted. If
1306 * an interrupt was posted to the bus, the
1307 * test failed.
1308 */
1309 adapter->test_icr = 0;
1310
1311 /* Flush any pending interrupts */
1312 wr32(E1000_ICR, ~0);
1313
1314 wr32(E1000_IMC, ~mask);
1315 wr32(E1000_ICS, ~mask);
1316 wrfl();
1317 msleep(10);
1318
1319 if (adapter->test_icr & mask) {
1320 *data = 5;
1321 break;
1322 }
1323 }
1324 }
1325
1326 /* Disable all the interrupts */
1327 wr32(E1000_IMC, ~0);
1328 wrfl();
1329 msleep(10);
1330
1331 /* Unhook test interrupt handler */
1332 if (adapter->msix_entries)
1333 free_irq(adapter->msix_entries[0].vector, adapter);
1334 else
1335 free_irq(irq, adapter);
1336
1337 return *data;
1338}
1339
1340static void igb_free_desc_rings(struct igb_adapter *adapter)
1341{
1342 igb_free_tx_resources(&adapter->test_tx_ring);
1343 igb_free_rx_resources(&adapter->test_rx_ring);
1344}
1345
1346static int igb_setup_desc_rings(struct igb_adapter *adapter)
1347{
1348 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1349 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1350 struct e1000_hw *hw = &adapter->hw;
1351 int ret_val;
1352
1353 /* Setup Tx descriptor ring and Tx buffers */
1354 tx_ring->count = IGB_DEFAULT_TXD;
1355 tx_ring->dev = &adapter->pdev->dev;
1356 tx_ring->netdev = adapter->netdev;
1357 tx_ring->reg_idx = adapter->vfs_allocated_count;
1358
1359 if (igb_setup_tx_resources(tx_ring)) {
1360 ret_val = 1;
1361 goto err_nomem;
1362 }
1363
1364 igb_setup_tctl(adapter);
1365 igb_configure_tx_ring(adapter, tx_ring);
1366
1367 /* Setup Rx descriptor ring and Rx buffers */
1368 rx_ring->count = IGB_DEFAULT_RXD;
1369 rx_ring->dev = &adapter->pdev->dev;
1370 rx_ring->netdev = adapter->netdev;
1371 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1372 rx_ring->reg_idx = adapter->vfs_allocated_count;
1373
1374 if (igb_setup_rx_resources(rx_ring)) {
1375 ret_val = 3;
1376 goto err_nomem;
1377 }
1378
1379 /* set the default queue to queue 0 of PF */
1380 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1381
1382 /* enable receive ring */
1383 igb_setup_rctl(adapter);
1384 igb_configure_rx_ring(adapter, rx_ring);
1385
1386 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1387
1388 return 0;
1389
1390err_nomem:
1391 igb_free_desc_rings(adapter);
1392 return ret_val;
1393}
1394
1395static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1396{
1397 struct e1000_hw *hw = &adapter->hw;
1398
1399 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1400 igb_write_phy_reg(hw, 29, 0x001F);
1401 igb_write_phy_reg(hw, 30, 0x8FFC);
1402 igb_write_phy_reg(hw, 29, 0x001A);
1403 igb_write_phy_reg(hw, 30, 0x8FF0);
1404}
1405
1406static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1407{
1408 struct e1000_hw *hw = &adapter->hw;
1409 u32 ctrl_reg = 0;
1410
1411 hw->mac.autoneg = false;
1412
1413 if (hw->phy.type == e1000_phy_m88) {
1414 /* Auto-MDI/MDIX Off */
1415 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1416 /* reset to update Auto-MDI/MDIX */
1417 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1418 /* autoneg off */
1419 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1420 } else if (hw->phy.type == e1000_phy_82580) {
1421 /* enable MII loopback */
1422 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1423 }
1424
1425 ctrl_reg = rd32(E1000_CTRL);
1426
1427 /* force 1000, set loopback */
1428 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1429
1430 /* Now set up the MAC to the same speed/duplex as the PHY. */
1431 ctrl_reg = rd32(E1000_CTRL);
1432 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1433 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1434 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1435 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1436 E1000_CTRL_FD | /* Force Duplex to FULL */
1437 E1000_CTRL_SLU); /* Set link up enable bit */
1438
1439 if (hw->phy.type == e1000_phy_m88)
1440 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1441
1442 wr32(E1000_CTRL, ctrl_reg);
1443
1444 /* Disable the receiver on the PHY so when a cable is plugged in, the
1445 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1446 */
1447 if (hw->phy.type == e1000_phy_m88)
1448 igb_phy_disable_receiver(adapter);
1449
1450 udelay(500);
1451
1452 return 0;
1453}
1454
1455static int igb_set_phy_loopback(struct igb_adapter *adapter)
1456{
1457 return igb_integrated_phy_loopback(adapter);
1458}
1459
1460static int igb_setup_loopback_test(struct igb_adapter *adapter)
1461{
1462 struct e1000_hw *hw = &adapter->hw;
1463 u32 reg;
1464
1465 reg = rd32(E1000_CTRL_EXT);
1466
1467 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1468 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1469 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1470 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1471 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1472 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1473
1474 /* Enable DH89xxCC MPHY for near end loopback */
1475 reg = rd32(E1000_MPHY_ADDR_CTL);
1476 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1477 E1000_MPHY_PCS_CLK_REG_OFFSET;
1478 wr32(E1000_MPHY_ADDR_CTL, reg);
1479
1480 reg = rd32(E1000_MPHY_DATA);
1481 reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1482 wr32(E1000_MPHY_DATA, reg);
1483 }
1484
1485 reg = rd32(E1000_RCTL);
1486 reg |= E1000_RCTL_LBM_TCVR;
1487 wr32(E1000_RCTL, reg);
1488
1489 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1490
1491 reg = rd32(E1000_CTRL);
1492 reg &= ~(E1000_CTRL_RFCE |
1493 E1000_CTRL_TFCE |
1494 E1000_CTRL_LRST);
1495 reg |= E1000_CTRL_SLU |
1496 E1000_CTRL_FD;
1497 wr32(E1000_CTRL, reg);
1498
1499 /* Unset switch control to serdes energy detect */
1500 reg = rd32(E1000_CONNSW);
1501 reg &= ~E1000_CONNSW_ENRGSRC;
1502 wr32(E1000_CONNSW, reg);
1503
1504 /* Set PCS register for forced speed */
1505 reg = rd32(E1000_PCS_LCTL);
1506 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
1507 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1508 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1509 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1510 E1000_PCS_LCTL_FSD | /* Force Speed */
1511 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1512 wr32(E1000_PCS_LCTL, reg);
1513
1514 return 0;
1515 }
1516
1517 return igb_set_phy_loopback(adapter);
1518}
1519
1520static void igb_loopback_cleanup(struct igb_adapter *adapter)
1521{
1522 struct e1000_hw *hw = &adapter->hw;
1523 u32 rctl;
1524 u16 phy_reg;
1525
1526 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1527 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1528 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1529 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1530 u32 reg;
1531
1532 /* Disable near end loopback on DH89xxCC */
1533 reg = rd32(E1000_MPHY_ADDR_CTL);
1534 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1535 E1000_MPHY_PCS_CLK_REG_OFFSET;
1536 wr32(E1000_MPHY_ADDR_CTL, reg);
1537
1538 reg = rd32(E1000_MPHY_DATA);
1539 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1540 wr32(E1000_MPHY_DATA, reg);
1541 }
1542
1543 rctl = rd32(E1000_RCTL);
1544 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1545 wr32(E1000_RCTL, rctl);
1546
1547 hw->mac.autoneg = true;
1548 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1549 if (phy_reg & MII_CR_LOOPBACK) {
1550 phy_reg &= ~MII_CR_LOOPBACK;
1551 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1552 igb_phy_sw_reset(hw);
1553 }
1554}
1555
1556static void igb_create_lbtest_frame(struct sk_buff *skb,
1557 unsigned int frame_size)
1558{
1559 memset(skb->data, 0xFF, frame_size);
1560 frame_size /= 2;
1561 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1562 memset(&skb->data[frame_size + 10], 0xBE, 1);
1563 memset(&skb->data[frame_size + 12], 0xAF, 1);
1564}
1565
1566static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1567{
1568 frame_size /= 2;
1569 if (*(skb->data + 3) == 0xFF) {
1570 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1571 (*(skb->data + frame_size + 12) == 0xAF)) {
1572 return 0;
1573 }
1574 }
1575 return 13;
1576}
1577
1578static int igb_clean_test_rings(struct igb_ring *rx_ring,
1579 struct igb_ring *tx_ring,
1580 unsigned int size)
1581{
1582 union e1000_adv_rx_desc *rx_desc;
1583 struct igb_buffer *buffer_info;
1584 int rx_ntc, tx_ntc, count = 0;
1585 u32 staterr;
1586
1587 /* initialize next to clean and descriptor values */
1588 rx_ntc = rx_ring->next_to_clean;
1589 tx_ntc = tx_ring->next_to_clean;
1590 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1591 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1592
1593 while (staterr & E1000_RXD_STAT_DD) {
1594 /* check rx buffer */
1595 buffer_info = &rx_ring->buffer_info[rx_ntc];
1596
1597 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1598 dma_unmap_single(rx_ring->dev,
1599 buffer_info->dma,
1600 rx_ring->rx_buffer_len,
1601 DMA_FROM_DEVICE);
1602 buffer_info->dma = 0;
1603
1604 /* verify contents of skb */
1605 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1606 count++;
1607
1608 /* unmap buffer on tx side */
1609 buffer_info = &tx_ring->buffer_info[tx_ntc];
1610 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1611
1612 /* increment rx/tx next to clean counters */
1613 rx_ntc++;
1614 if (rx_ntc == rx_ring->count)
1615 rx_ntc = 0;
1616 tx_ntc++;
1617 if (tx_ntc == tx_ring->count)
1618 tx_ntc = 0;
1619
1620 /* fetch next descriptor */
1621 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1622 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1623 }
1624
1625 /* re-map buffers to ring, store next to clean values */
1626 igb_alloc_rx_buffers_adv(rx_ring, count);
1627 rx_ring->next_to_clean = rx_ntc;
1628 tx_ring->next_to_clean = tx_ntc;
1629
1630 return count;
1631}
1632
1633static int igb_run_loopback_test(struct igb_adapter *adapter)
1634{
1635 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1636 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1637 int i, j, lc, good_cnt, ret_val = 0;
1638 unsigned int size = 1024;
1639 netdev_tx_t tx_ret_val;
1640 struct sk_buff *skb;
1641
1642 /* allocate test skb */
1643 skb = alloc_skb(size, GFP_KERNEL);
1644 if (!skb)
1645 return 11;
1646
1647 /* place data into test skb */
1648 igb_create_lbtest_frame(skb, size);
1649 skb_put(skb, size);
1650
1651 /*
1652 * Calculate the loop count based on the largest descriptor ring
1653 * The idea is to wrap the largest ring a number of times using 64
1654 * send/receive pairs during each loop
1655 */
1656
1657 if (rx_ring->count <= tx_ring->count)
1658 lc = ((tx_ring->count / 64) * 2) + 1;
1659 else
1660 lc = ((rx_ring->count / 64) * 2) + 1;
1661
1662 for (j = 0; j <= lc; j++) { /* loop count loop */
1663 /* reset count of good packets */
1664 good_cnt = 0;
1665
1666 /* place 64 packets on the transmit queue*/
1667 for (i = 0; i < 64; i++) {
1668 skb_get(skb);
1669 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1670 if (tx_ret_val == NETDEV_TX_OK)
1671 good_cnt++;
1672 }
1673
1674 if (good_cnt != 64) {
1675 ret_val = 12;
1676 break;
1677 }
1678
1679 /* allow 200 milliseconds for packets to go from tx to rx */
1680 msleep(200);
1681
1682 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1683 if (good_cnt != 64) {
1684 ret_val = 13;
1685 break;
1686 }
1687 } /* end loop count loop */
1688
1689 /* free the original skb */
1690 kfree_skb(skb);
1691
1692 return ret_val;
1693}
1694
1695static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1696{
1697 /* PHY loopback cannot be performed if SoL/IDER
1698 * sessions are active */
1699 if (igb_check_reset_block(&adapter->hw)) {
1700 dev_err(&adapter->pdev->dev,
1701 "Cannot do PHY loopback test "
1702 "when SoL/IDER is active.\n");
1703 *data = 0;
1704 goto out;
1705 }
1706 *data = igb_setup_desc_rings(adapter);
1707 if (*data)
1708 goto out;
1709 *data = igb_setup_loopback_test(adapter);
1710 if (*data)
1711 goto err_loopback;
1712 *data = igb_run_loopback_test(adapter);
1713 igb_loopback_cleanup(adapter);
1714
1715err_loopback:
1716 igb_free_desc_rings(adapter);
1717out:
1718 return *data;
1719}
1720
1721static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1722{
1723 struct e1000_hw *hw = &adapter->hw;
1724 *data = 0;
1725 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1726 int i = 0;
1727 hw->mac.serdes_has_link = false;
1728
1729 /* On some blade server designs, link establishment
1730 * could take as long as 2-3 minutes */
1731 do {
1732 hw->mac.ops.check_for_link(&adapter->hw);
1733 if (hw->mac.serdes_has_link)
1734 return *data;
1735 msleep(20);
1736 } while (i++ < 3750);
1737
1738 *data = 1;
1739 } else {
1740 hw->mac.ops.check_for_link(&adapter->hw);
1741 if (hw->mac.autoneg)
1742 msleep(4000);
1743
1744 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1745 *data = 1;
1746 }
1747 return *data;
1748}
1749
1750static void igb_diag_test(struct net_device *netdev,
1751 struct ethtool_test *eth_test, u64 *data)
1752{
1753 struct igb_adapter *adapter = netdev_priv(netdev);
1754 u16 autoneg_advertised;
1755 u8 forced_speed_duplex, autoneg;
1756 bool if_running = netif_running(netdev);
1757
1758 set_bit(__IGB_TESTING, &adapter->state);
1759 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1760 /* Offline tests */
1761
1762 /* save speed, duplex, autoneg settings */
1763 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1764 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1765 autoneg = adapter->hw.mac.autoneg;
1766
1767 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1768
1769 /* power up link for link test */
1770 igb_power_up_link(adapter);
1771
1772 /* Link test performed before hardware reset so autoneg doesn't
1773 * interfere with test result */
1774 if (igb_link_test(adapter, &data[4]))
1775 eth_test->flags |= ETH_TEST_FL_FAILED;
1776
1777 if (if_running)
1778 /* indicate we're in test mode */
1779 dev_close(netdev);
1780 else
1781 igb_reset(adapter);
1782
1783 if (igb_reg_test(adapter, &data[0]))
1784 eth_test->flags |= ETH_TEST_FL_FAILED;
1785
1786 igb_reset(adapter);
1787 if (igb_eeprom_test(adapter, &data[1]))
1788 eth_test->flags |= ETH_TEST_FL_FAILED;
1789
1790 igb_reset(adapter);
1791 if (igb_intr_test(adapter, &data[2]))
1792 eth_test->flags |= ETH_TEST_FL_FAILED;
1793
1794 igb_reset(adapter);
1795 /* power up link for loopback test */
1796 igb_power_up_link(adapter);
1797 if (igb_loopback_test(adapter, &data[3]))
1798 eth_test->flags |= ETH_TEST_FL_FAILED;
1799
1800 /* restore speed, duplex, autoneg settings */
1801 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1802 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1803 adapter->hw.mac.autoneg = autoneg;
1804
1805 /* force this routine to wait until autoneg complete/timeout */
1806 adapter->hw.phy.autoneg_wait_to_complete = true;
1807 igb_reset(adapter);
1808 adapter->hw.phy.autoneg_wait_to_complete = false;
1809
1810 clear_bit(__IGB_TESTING, &adapter->state);
1811 if (if_running)
1812 dev_open(netdev);
1813 } else {
1814 dev_info(&adapter->pdev->dev, "online testing starting\n");
1815
1816 /* PHY is powered down when interface is down */
1817 if (if_running && igb_link_test(adapter, &data[4]))
1818 eth_test->flags |= ETH_TEST_FL_FAILED;
1819 else
1820 data[4] = 0;
1821
1822 /* Online tests aren't run; pass by default */
1823 data[0] = 0;
1824 data[1] = 0;
1825 data[2] = 0;
1826 data[3] = 0;
1827
1828 clear_bit(__IGB_TESTING, &adapter->state);
1829 }
1830 msleep_interruptible(4 * 1000);
1831}
1832
1833static int igb_wol_exclusion(struct igb_adapter *adapter,
1834 struct ethtool_wolinfo *wol)
1835{
1836 struct e1000_hw *hw = &adapter->hw;
1837 int retval = 1; /* fail by default */
1838
1839 switch (hw->device_id) {
1840 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1841 /* WoL not supported */
1842 wol->supported = 0;
1843 break;
1844 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1845 case E1000_DEV_ID_82576_FIBER:
1846 case E1000_DEV_ID_82576_SERDES:
1847 /* Wake events not supported on port B */
1848 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1849 wol->supported = 0;
1850 break;
1851 }
1852 /* return success for non excluded adapter ports */
1853 retval = 0;
1854 break;
1855 case E1000_DEV_ID_82576_QUAD_COPPER:
1856 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1857 /* quad port adapters only support WoL on port A */
1858 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1859 wol->supported = 0;
1860 break;
1861 }
1862 /* return success for non excluded adapter ports */
1863 retval = 0;
1864 break;
1865 default:
1866 /* dual port cards only support WoL on port A from now on
1867 * unless it was enabled in the eeprom for port B
1868 * so exclude FUNC_1 ports from having WoL enabled */
1869 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1870 !adapter->eeprom_wol) {
1871 wol->supported = 0;
1872 break;
1873 }
1874
1875 retval = 0;
1876 }
1877
1878 return retval;
1879}
1880
1881static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1882{
1883 struct igb_adapter *adapter = netdev_priv(netdev);
1884
1885 wol->supported = WAKE_UCAST | WAKE_MCAST |
1886 WAKE_BCAST | WAKE_MAGIC |
1887 WAKE_PHY;
1888 wol->wolopts = 0;
1889
1890 /* this function will set ->supported = 0 and return 1 if wol is not
1891 * supported by this hardware */
1892 if (igb_wol_exclusion(adapter, wol) ||
1893 !device_can_wakeup(&adapter->pdev->dev))
1894 return;
1895
1896 /* apply any specific unsupported masks here */
1897 switch (adapter->hw.device_id) {
1898 default:
1899 break;
1900 }
1901
1902 if (adapter->wol & E1000_WUFC_EX)
1903 wol->wolopts |= WAKE_UCAST;
1904 if (adapter->wol & E1000_WUFC_MC)
1905 wol->wolopts |= WAKE_MCAST;
1906 if (adapter->wol & E1000_WUFC_BC)
1907 wol->wolopts |= WAKE_BCAST;
1908 if (adapter->wol & E1000_WUFC_MAG)
1909 wol->wolopts |= WAKE_MAGIC;
1910 if (adapter->wol & E1000_WUFC_LNKC)
1911 wol->wolopts |= WAKE_PHY;
1912}
1913
1914static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1915{
1916 struct igb_adapter *adapter = netdev_priv(netdev);
1917
1918 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1919 return -EOPNOTSUPP;
1920
1921 if (igb_wol_exclusion(adapter, wol) ||
1922 !device_can_wakeup(&adapter->pdev->dev))
1923 return wol->wolopts ? -EOPNOTSUPP : 0;
1924
1925 /* these settings will always override what we currently have */
1926 adapter->wol = 0;
1927
1928 if (wol->wolopts & WAKE_UCAST)
1929 adapter->wol |= E1000_WUFC_EX;
1930 if (wol->wolopts & WAKE_MCAST)
1931 adapter->wol |= E1000_WUFC_MC;
1932 if (wol->wolopts & WAKE_BCAST)
1933 adapter->wol |= E1000_WUFC_BC;
1934 if (wol->wolopts & WAKE_MAGIC)
1935 adapter->wol |= E1000_WUFC_MAG;
1936 if (wol->wolopts & WAKE_PHY)
1937 adapter->wol |= E1000_WUFC_LNKC;
1938 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1939
1940 return 0;
1941}
1942
1943/* bit defines for adapter->led_status */
1944#define IGB_LED_ON 0
1945
1946static int igb_set_phys_id(struct net_device *netdev,
1947 enum ethtool_phys_id_state state)
1948{
1949 struct igb_adapter *adapter = netdev_priv(netdev);
1950 struct e1000_hw *hw = &adapter->hw;
1951
1952 switch (state) {
1953 case ETHTOOL_ID_ACTIVE:
1954 igb_blink_led(hw);
1955 return 2;
1956 case ETHTOOL_ID_ON:
1957 igb_blink_led(hw);
1958 break;
1959 case ETHTOOL_ID_OFF:
1960 igb_led_off(hw);
1961 break;
1962 case ETHTOOL_ID_INACTIVE:
1963 igb_led_off(hw);
1964 clear_bit(IGB_LED_ON, &adapter->led_status);
1965 igb_cleanup_led(hw);
1966 break;
1967 }
1968
1969 return 0;
1970}
1971
1972static int igb_set_coalesce(struct net_device *netdev,
1973 struct ethtool_coalesce *ec)
1974{
1975 struct igb_adapter *adapter = netdev_priv(netdev);
1976 int i;
1977
1978 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1979 ((ec->rx_coalesce_usecs > 3) &&
1980 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1981 (ec->rx_coalesce_usecs == 2))
1982 return -EINVAL;
1983
1984 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1985 ((ec->tx_coalesce_usecs > 3) &&
1986 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1987 (ec->tx_coalesce_usecs == 2))
1988 return -EINVAL;
1989
1990 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1991 return -EINVAL;
1992
1993 /* If ITR is disabled, disable DMAC */
1994 if (ec->rx_coalesce_usecs == 0) {
1995 if (adapter->flags & IGB_FLAG_DMAC)
1996 adapter->flags &= ~IGB_FLAG_DMAC;
1997 }
1998
1999 /* convert to rate of irq's per second */
2000 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
2001 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2002 else
2003 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2004
2005 /* convert to rate of irq's per second */
2006 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
2007 adapter->tx_itr_setting = adapter->rx_itr_setting;
2008 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
2009 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2010 else
2011 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2012
2013 for (i = 0; i < adapter->num_q_vectors; i++) {
2014 struct igb_q_vector *q_vector = adapter->q_vector[i];
2015 if (q_vector->rx_ring)
2016 q_vector->itr_val = adapter->rx_itr_setting;
2017 else
2018 q_vector->itr_val = adapter->tx_itr_setting;
2019 if (q_vector->itr_val && q_vector->itr_val <= 3)
2020 q_vector->itr_val = IGB_START_ITR;
2021 q_vector->set_itr = 1;
2022 }
2023
2024 return 0;
2025}
2026
2027static int igb_get_coalesce(struct net_device *netdev,
2028 struct ethtool_coalesce *ec)
2029{
2030 struct igb_adapter *adapter = netdev_priv(netdev);
2031
2032 if (adapter->rx_itr_setting <= 3)
2033 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2034 else
2035 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2036
2037 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
2038 if (adapter->tx_itr_setting <= 3)
2039 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2040 else
2041 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2042 }
2043
2044 return 0;
2045}
2046
2047static int igb_nway_reset(struct net_device *netdev)
2048{
2049 struct igb_adapter *adapter = netdev_priv(netdev);
2050 if (netif_running(netdev))
2051 igb_reinit_locked(adapter);
2052 return 0;
2053}
2054
2055static int igb_get_sset_count(struct net_device *netdev, int sset)
2056{
2057 switch (sset) {
2058 case ETH_SS_STATS:
2059 return IGB_STATS_LEN;
2060 case ETH_SS_TEST:
2061 return IGB_TEST_LEN;
2062 default:
2063 return -ENOTSUPP;
2064 }
2065}
2066
2067static void igb_get_ethtool_stats(struct net_device *netdev,
2068 struct ethtool_stats *stats, u64 *data)
2069{
2070 struct igb_adapter *adapter = netdev_priv(netdev);
2071 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
2072 unsigned int start;
2073 struct igb_ring *ring;
2074 int i, j;
2075 char *p;
2076
2077 spin_lock(&adapter->stats64_lock);
2078 igb_update_stats(adapter, net_stats);
2079
2080 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2081 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
2082 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
2083 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2084 }
2085 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2086 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2087 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2088 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2089 }
2090 for (j = 0; j < adapter->num_tx_queues; j++) {
2091 u64 restart2;
2092
2093 ring = adapter->tx_ring[j];
2094 do {
2095 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
2096 data[i] = ring->tx_stats.packets;
2097 data[i+1] = ring->tx_stats.bytes;
2098 data[i+2] = ring->tx_stats.restart_queue;
2099 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
2100 do {
2101 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
2102 restart2 = ring->tx_stats.restart_queue2;
2103 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
2104 data[i+2] += restart2;
2105
2106 i += IGB_TX_QUEUE_STATS_LEN;
2107 }
2108 for (j = 0; j < adapter->num_rx_queues; j++) {
2109 ring = adapter->rx_ring[j];
2110 do {
2111 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
2112 data[i] = ring->rx_stats.packets;
2113 data[i+1] = ring->rx_stats.bytes;
2114 data[i+2] = ring->rx_stats.drops;
2115 data[i+3] = ring->rx_stats.csum_err;
2116 data[i+4] = ring->rx_stats.alloc_failed;
2117 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
2118 i += IGB_RX_QUEUE_STATS_LEN;
2119 }
2120 spin_unlock(&adapter->stats64_lock);
2121}
2122
2123static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2124{
2125 struct igb_adapter *adapter = netdev_priv(netdev);
2126 u8 *p = data;
2127 int i;
2128
2129 switch (stringset) {
2130 case ETH_SS_TEST:
2131 memcpy(data, *igb_gstrings_test,
2132 IGB_TEST_LEN*ETH_GSTRING_LEN);
2133 break;
2134 case ETH_SS_STATS:
2135 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2136 memcpy(p, igb_gstrings_stats[i].stat_string,
2137 ETH_GSTRING_LEN);
2138 p += ETH_GSTRING_LEN;
2139 }
2140 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2141 memcpy(p, igb_gstrings_net_stats[i].stat_string,
2142 ETH_GSTRING_LEN);
2143 p += ETH_GSTRING_LEN;
2144 }
2145 for (i = 0; i < adapter->num_tx_queues; i++) {
2146 sprintf(p, "tx_queue_%u_packets", i);
2147 p += ETH_GSTRING_LEN;
2148 sprintf(p, "tx_queue_%u_bytes", i);
2149 p += ETH_GSTRING_LEN;
2150 sprintf(p, "tx_queue_%u_restart", i);
2151 p += ETH_GSTRING_LEN;
2152 }
2153 for (i = 0; i < adapter->num_rx_queues; i++) {
2154 sprintf(p, "rx_queue_%u_packets", i);
2155 p += ETH_GSTRING_LEN;
2156 sprintf(p, "rx_queue_%u_bytes", i);
2157 p += ETH_GSTRING_LEN;
2158 sprintf(p, "rx_queue_%u_drops", i);
2159 p += ETH_GSTRING_LEN;
2160 sprintf(p, "rx_queue_%u_csum_err", i);
2161 p += ETH_GSTRING_LEN;
2162 sprintf(p, "rx_queue_%u_alloc_failed", i);
2163 p += ETH_GSTRING_LEN;
2164 }
2165/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2166 break;
2167 }
2168}
2169
2170static const struct ethtool_ops igb_ethtool_ops = {
2171 .get_settings = igb_get_settings,
2172 .set_settings = igb_set_settings,
2173 .get_drvinfo = igb_get_drvinfo,
2174 .get_regs_len = igb_get_regs_len,
2175 .get_regs = igb_get_regs,
2176 .get_wol = igb_get_wol,
2177 .set_wol = igb_set_wol,
2178 .get_msglevel = igb_get_msglevel,
2179 .set_msglevel = igb_set_msglevel,
2180 .nway_reset = igb_nway_reset,
2181 .get_link = igb_get_link,
2182 .get_eeprom_len = igb_get_eeprom_len,
2183 .get_eeprom = igb_get_eeprom,
2184 .set_eeprom = igb_set_eeprom,
2185 .get_ringparam = igb_get_ringparam,
2186 .set_ringparam = igb_set_ringparam,
2187 .get_pauseparam = igb_get_pauseparam,
2188 .set_pauseparam = igb_set_pauseparam,
2189 .self_test = igb_diag_test,
2190 .get_strings = igb_get_strings,
2191 .set_phys_id = igb_set_phys_id,
2192 .get_sset_count = igb_get_sset_count,
2193 .get_ethtool_stats = igb_get_ethtool_stats,
2194 .get_coalesce = igb_get_coalesce,
2195 .set_coalesce = igb_set_coalesce,
2196};
2197
2198void igb_set_ethtool_ops(struct net_device *netdev)
2199{
2200 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
2201}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
new file mode 100644
index 00000000000..40d4c405fd7
--- /dev/null
+++ b/drivers/net/igb/igb_main.c
@@ -0,0 +1,6890 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/bitops.h>
32#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
35#include <linux/ipv6.h>
36#include <linux/slab.h>
37#include <net/checksum.h>
38#include <net/ip6_checksum.h>
39#include <linux/net_tstamp.h>
40#include <linux/mii.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43#include <linux/pci.h>
44#include <linux/pci-aspm.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
47#include <linux/if_ether.h>
48#include <linux/aer.h>
49#include <linux/prefetch.h>
50#ifdef CONFIG_IGB_DCA
51#include <linux/dca.h>
52#endif
53#include "igb.h"
54
55#define MAJ 3
56#define MIN 0
57#define BUILD 6
58#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
59__stringify(BUILD) "-k"
60char igb_driver_name[] = "igb";
61char igb_driver_version[] = DRV_VERSION;
62static const char igb_driver_string[] =
63 "Intel(R) Gigabit Ethernet Network Driver";
64static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
65
66static const struct e1000_info *igb_info_tbl[] = {
67 [board_82575] = &e1000_82575_info,
68};
69
70static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
96 /* required last entry */
97 {0, }
98};
99
100MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
101
102void igb_reset(struct igb_adapter *);
103static int igb_setup_all_tx_resources(struct igb_adapter *);
104static int igb_setup_all_rx_resources(struct igb_adapter *);
105static void igb_free_all_tx_resources(struct igb_adapter *);
106static void igb_free_all_rx_resources(struct igb_adapter *);
107static void igb_setup_mrqc(struct igb_adapter *);
108static int igb_probe(struct pci_dev *, const struct pci_device_id *);
109static void __devexit igb_remove(struct pci_dev *pdev);
110static void igb_init_hw_timer(struct igb_adapter *adapter);
111static int igb_sw_init(struct igb_adapter *);
112static int igb_open(struct net_device *);
113static int igb_close(struct net_device *);
114static void igb_configure_tx(struct igb_adapter *);
115static void igb_configure_rx(struct igb_adapter *);
116static void igb_clean_all_tx_rings(struct igb_adapter *);
117static void igb_clean_all_rx_rings(struct igb_adapter *);
118static void igb_clean_tx_ring(struct igb_ring *);
119static void igb_clean_rx_ring(struct igb_ring *);
120static void igb_set_rx_mode(struct net_device *);
121static void igb_update_phy_info(unsigned long);
122static void igb_watchdog(unsigned long);
123static void igb_watchdog_task(struct work_struct *);
124static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
125static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *stats);
127static int igb_change_mtu(struct net_device *, int);
128static int igb_set_mac(struct net_device *, void *);
129static void igb_set_uta(struct igb_adapter *adapter);
130static irqreturn_t igb_intr(int irq, void *);
131static irqreturn_t igb_intr_msi(int irq, void *);
132static irqreturn_t igb_msix_other(int irq, void *);
133static irqreturn_t igb_msix_ring(int irq, void *);
134#ifdef CONFIG_IGB_DCA
135static void igb_update_dca(struct igb_q_vector *);
136static void igb_setup_dca(struct igb_adapter *);
137#endif /* CONFIG_IGB_DCA */
138static bool igb_clean_tx_irq(struct igb_q_vector *);
139static int igb_poll(struct napi_struct *, int);
140static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
141static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
142static void igb_tx_timeout(struct net_device *);
143static void igb_reset_task(struct work_struct *);
144static void igb_vlan_mode(struct net_device *netdev, u32 features);
145static void igb_vlan_rx_add_vid(struct net_device *, u16);
146static void igb_vlan_rx_kill_vid(struct net_device *, u16);
147static void igb_restore_vlan(struct igb_adapter *);
148static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
149static void igb_ping_all_vfs(struct igb_adapter *);
150static void igb_msg_task(struct igb_adapter *);
151static void igb_vmm_control(struct igb_adapter *);
152static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
153static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
154static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
155static int igb_ndo_set_vf_vlan(struct net_device *netdev,
156 int vf, u16 vlan, u8 qos);
157static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
158static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
159 struct ifla_vf_info *ivi);
160static void igb_check_vf_rate_limit(struct igb_adapter *);
161
162#ifdef CONFIG_PM
163static int igb_suspend(struct pci_dev *, pm_message_t);
164static int igb_resume(struct pci_dev *);
165#endif
166static void igb_shutdown(struct pci_dev *);
167#ifdef CONFIG_IGB_DCA
168static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
169static struct notifier_block dca_notifier = {
170 .notifier_call = igb_notify_dca,
171 .next = NULL,
172 .priority = 0
173};
174#endif
175#ifdef CONFIG_NET_POLL_CONTROLLER
176/* for netdump / net console */
177static void igb_netpoll(struct net_device *);
178#endif
179#ifdef CONFIG_PCI_IOV
180static unsigned int max_vfs = 0;
181module_param(max_vfs, uint, 0);
182MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
183 "per physical function");
184#endif /* CONFIG_PCI_IOV */
185
186static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
187 pci_channel_state_t);
188static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
189static void igb_io_resume(struct pci_dev *);
190
191static struct pci_error_handlers igb_err_handler = {
192 .error_detected = igb_io_error_detected,
193 .slot_reset = igb_io_slot_reset,
194 .resume = igb_io_resume,
195};
196
197
198static struct pci_driver igb_driver = {
199 .name = igb_driver_name,
200 .id_table = igb_pci_tbl,
201 .probe = igb_probe,
202 .remove = __devexit_p(igb_remove),
203#ifdef CONFIG_PM
204 /* Power Management Hooks */
205 .suspend = igb_suspend,
206 .resume = igb_resume,
207#endif
208 .shutdown = igb_shutdown,
209 .err_handler = &igb_err_handler
210};
211
212MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
213MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
214MODULE_LICENSE("GPL");
215MODULE_VERSION(DRV_VERSION);
216
217struct igb_reg_info {
218 u32 ofs;
219 char *name;
220};
221
222static const struct igb_reg_info igb_reg_info_tbl[] = {
223
224 /* General Registers */
225 {E1000_CTRL, "CTRL"},
226 {E1000_STATUS, "STATUS"},
227 {E1000_CTRL_EXT, "CTRL_EXT"},
228
229 /* Interrupt Registers */
230 {E1000_ICR, "ICR"},
231
232 /* RX Registers */
233 {E1000_RCTL, "RCTL"},
234 {E1000_RDLEN(0), "RDLEN"},
235 {E1000_RDH(0), "RDH"},
236 {E1000_RDT(0), "RDT"},
237 {E1000_RXDCTL(0), "RXDCTL"},
238 {E1000_RDBAL(0), "RDBAL"},
239 {E1000_RDBAH(0), "RDBAH"},
240
241 /* TX Registers */
242 {E1000_TCTL, "TCTL"},
243 {E1000_TDBAL(0), "TDBAL"},
244 {E1000_TDBAH(0), "TDBAH"},
245 {E1000_TDLEN(0), "TDLEN"},
246 {E1000_TDH(0), "TDH"},
247 {E1000_TDT(0), "TDT"},
248 {E1000_TXDCTL(0), "TXDCTL"},
249 {E1000_TDFH, "TDFH"},
250 {E1000_TDFT, "TDFT"},
251 {E1000_TDFHS, "TDFHS"},
252 {E1000_TDFPC, "TDFPC"},
253
254 /* List Terminator */
255 {}
256};
257
258/*
259 * igb_regdump - register printout routine
260 */
261static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
262{
263 int n = 0;
264 char rname[16];
265 u32 regs[8];
266
267 switch (reginfo->ofs) {
268 case E1000_RDLEN(0):
269 for (n = 0; n < 4; n++)
270 regs[n] = rd32(E1000_RDLEN(n));
271 break;
272 case E1000_RDH(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDH(n));
275 break;
276 case E1000_RDT(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RDT(n));
279 break;
280 case E1000_RXDCTL(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RXDCTL(n));
283 break;
284 case E1000_RDBAL(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RDBAL(n));
287 break;
288 case E1000_RDBAH(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_RDBAH(n));
291 break;
292 case E1000_TDBAL(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDBAL(n));
295 break;
296 case E1000_TDBAH(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_TDBAH(n));
299 break;
300 case E1000_TDLEN(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDLEN(n));
303 break;
304 case E1000_TDH(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDH(n));
307 break;
308 case E1000_TDT(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TDT(n));
311 break;
312 case E1000_TXDCTL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_TXDCTL(n));
315 break;
316 default:
317 printk(KERN_INFO "%-15s %08x\n",
318 reginfo->name, rd32(reginfo->ofs));
319 return;
320 }
321
322 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
323 printk(KERN_INFO "%-15s ", rname);
324 for (n = 0; n < 4; n++)
325 printk(KERN_CONT "%08x ", regs[n]);
326 printk(KERN_CONT "\n");
327}
328
329/*
330 * igb_dump - Print registers, tx-rings and rx-rings
331 */
332static void igb_dump(struct igb_adapter *adapter)
333{
334 struct net_device *netdev = adapter->netdev;
335 struct e1000_hw *hw = &adapter->hw;
336 struct igb_reg_info *reginfo;
337 int n = 0;
338 struct igb_ring *tx_ring;
339 union e1000_adv_tx_desc *tx_desc;
340 struct my_u0 { u64 a; u64 b; } *u0;
341 struct igb_buffer *buffer_info;
342 struct igb_ring *rx_ring;
343 union e1000_adv_rx_desc *rx_desc;
344 u32 staterr;
345 int i = 0;
346
347 if (!netif_msg_hw(adapter))
348 return;
349
350 /* Print netdevice Info */
351 if (netdev) {
352 dev_info(&adapter->pdev->dev, "Net device Info\n");
353 printk(KERN_INFO "Device Name state "
354 "trans_start last_rx\n");
355 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
356 netdev->name,
357 netdev->state,
358 netdev->trans_start,
359 netdev->last_rx);
360 }
361
362 /* Print Registers */
363 dev_info(&adapter->pdev->dev, "Register Dump\n");
364 printk(KERN_INFO " Register Name Value\n");
365 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
366 reginfo->name; reginfo++) {
367 igb_regdump(hw, reginfo);
368 }
369
370 /* Print TX Ring Summary */
371 if (!netdev || !netif_running(netdev))
372 goto exit;
373
374 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
375 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
376 " leng ntw timestamp\n");
377 for (n = 0; n < adapter->num_tx_queues; n++) {
378 tx_ring = adapter->tx_ring[n];
379 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
380 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
381 n, tx_ring->next_to_use, tx_ring->next_to_clean,
382 (u64)buffer_info->dma,
383 buffer_info->length,
384 buffer_info->next_to_watch,
385 (u64)buffer_info->time_stamp);
386 }
387
388 /* Print TX Rings */
389 if (!netif_msg_tx_done(adapter))
390 goto rx_ring_summary;
391
392 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
393
394 /* Transmit Descriptor Formats
395 *
396 * Advanced Transmit Descriptor
397 * +--------------------------------------------------------------+
398 * 0 | Buffer Address [63:0] |
399 * +--------------------------------------------------------------+
400 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
401 * +--------------------------------------------------------------+
402 * 63 46 45 40 39 38 36 35 32 31 24 15 0
403 */
404
405 for (n = 0; n < adapter->num_tx_queues; n++) {
406 tx_ring = adapter->tx_ring[n];
407 printk(KERN_INFO "------------------------------------\n");
408 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
409 printk(KERN_INFO "------------------------------------\n");
410 printk(KERN_INFO "T [desc] [address 63:0 ] "
411 "[PlPOCIStDDM Ln] [bi->dma ] "
412 "leng ntw timestamp bi->skb\n");
413
414 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
415 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
416 buffer_info = &tx_ring->buffer_info[i];
417 u0 = (struct my_u0 *)tx_desc;
418 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
419 " %04X %3X %016llX %p", i,
420 le64_to_cpu(u0->a),
421 le64_to_cpu(u0->b),
422 (u64)buffer_info->dma,
423 buffer_info->length,
424 buffer_info->next_to_watch,
425 (u64)buffer_info->time_stamp,
426 buffer_info->skb);
427 if (i == tx_ring->next_to_use &&
428 i == tx_ring->next_to_clean)
429 printk(KERN_CONT " NTC/U\n");
430 else if (i == tx_ring->next_to_use)
431 printk(KERN_CONT " NTU\n");
432 else if (i == tx_ring->next_to_clean)
433 printk(KERN_CONT " NTC\n");
434 else
435 printk(KERN_CONT "\n");
436
437 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
438 print_hex_dump(KERN_INFO, "",
439 DUMP_PREFIX_ADDRESS,
440 16, 1, phys_to_virt(buffer_info->dma),
441 buffer_info->length, true);
442 }
443 }
444
445 /* Print RX Rings Summary */
446rx_ring_summary:
447 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
448 printk(KERN_INFO "Queue [NTU] [NTC]\n");
449 for (n = 0; n < adapter->num_rx_queues; n++) {
450 rx_ring = adapter->rx_ring[n];
451 printk(KERN_INFO " %5d %5X %5X\n", n,
452 rx_ring->next_to_use, rx_ring->next_to_clean);
453 }
454
455 /* Print RX Rings */
456 if (!netif_msg_rx_status(adapter))
457 goto exit;
458
459 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
460
461 /* Advanced Receive Descriptor (Read) Format
462 * 63 1 0
463 * +-----------------------------------------------------+
464 * 0 | Packet Buffer Address [63:1] |A0/NSE|
465 * +----------------------------------------------+------+
466 * 8 | Header Buffer Address [63:1] | DD |
467 * +-----------------------------------------------------+
468 *
469 *
470 * Advanced Receive Descriptor (Write-Back) Format
471 *
472 * 63 48 47 32 31 30 21 20 17 16 4 3 0
473 * +------------------------------------------------------+
474 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
475 * | Checksum Ident | | | | Type | Type |
476 * +------------------------------------------------------+
477 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
478 * +------------------------------------------------------+
479 * 63 48 47 32 31 20 19 0
480 */
481
482 for (n = 0; n < adapter->num_rx_queues; n++) {
483 rx_ring = adapter->rx_ring[n];
484 printk(KERN_INFO "------------------------------------\n");
485 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
486 printk(KERN_INFO "------------------------------------\n");
487 printk(KERN_INFO "R [desc] [ PktBuf A0] "
488 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
489 "<-- Adv Rx Read format\n");
490 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
491 "[vl er S cks ln] ---------------- [bi->skb] "
492 "<-- Adv Rx Write-Back format\n");
493
494 for (i = 0; i < rx_ring->count; i++) {
495 buffer_info = &rx_ring->buffer_info[i];
496 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
497 u0 = (struct my_u0 *)rx_desc;
498 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
499 if (staterr & E1000_RXD_STAT_DD) {
500 /* Descriptor Done */
501 printk(KERN_INFO "RWB[0x%03X] %016llX "
502 "%016llX ---------------- %p", i,
503 le64_to_cpu(u0->a),
504 le64_to_cpu(u0->b),
505 buffer_info->skb);
506 } else {
507 printk(KERN_INFO "R [0x%03X] %016llX "
508 "%016llX %016llX %p", i,
509 le64_to_cpu(u0->a),
510 le64_to_cpu(u0->b),
511 (u64)buffer_info->dma,
512 buffer_info->skb);
513
514 if (netif_msg_pktdata(adapter)) {
515 print_hex_dump(KERN_INFO, "",
516 DUMP_PREFIX_ADDRESS,
517 16, 1,
518 phys_to_virt(buffer_info->dma),
519 rx_ring->rx_buffer_len, true);
520 if (rx_ring->rx_buffer_len
521 < IGB_RXBUFFER_1024)
522 print_hex_dump(KERN_INFO, "",
523 DUMP_PREFIX_ADDRESS,
524 16, 1,
525 phys_to_virt(
526 buffer_info->page_dma +
527 buffer_info->page_offset),
528 PAGE_SIZE/2, true);
529 }
530 }
531
532 if (i == rx_ring->next_to_use)
533 printk(KERN_CONT " NTU\n");
534 else if (i == rx_ring->next_to_clean)
535 printk(KERN_CONT " NTC\n");
536 else
537 printk(KERN_CONT "\n");
538
539 }
540 }
541
542exit:
543 return;
544}
545
546
547/**
548 * igb_read_clock - read raw cycle counter (to be used by time counter)
549 */
550static cycle_t igb_read_clock(const struct cyclecounter *tc)
551{
552 struct igb_adapter *adapter =
553 container_of(tc, struct igb_adapter, cycles);
554 struct e1000_hw *hw = &adapter->hw;
555 u64 stamp = 0;
556 int shift = 0;
557
558 /*
559 * The timestamp latches on lowest register read. For the 82580
560 * the lowest register is SYSTIMR instead of SYSTIML. However we never
561 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
562 */
563 if (hw->mac.type == e1000_82580) {
564 stamp = rd32(E1000_SYSTIMR) >> 8;
565 shift = IGB_82580_TSYNC_SHIFT;
566 }
567
568 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
569 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
570 return stamp;
571}
572
573/**
574 * igb_get_hw_dev - return device
575 * used by hardware layer to print debugging information
576 **/
577struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
578{
579 struct igb_adapter *adapter = hw->back;
580 return adapter->netdev;
581}
582
583/**
584 * igb_init_module - Driver Registration Routine
585 *
586 * igb_init_module is the first routine called when the driver is
587 * loaded. All it does is register with the PCI subsystem.
588 **/
589static int __init igb_init_module(void)
590{
591 int ret;
592 printk(KERN_INFO "%s - version %s\n",
593 igb_driver_string, igb_driver_version);
594
595 printk(KERN_INFO "%s\n", igb_copyright);
596
597#ifdef CONFIG_IGB_DCA
598 dca_register_notify(&dca_notifier);
599#endif
600 ret = pci_register_driver(&igb_driver);
601 return ret;
602}
603
604module_init(igb_init_module);
605
606/**
607 * igb_exit_module - Driver Exit Cleanup Routine
608 *
609 * igb_exit_module is called just before the driver is removed
610 * from memory.
611 **/
612static void __exit igb_exit_module(void)
613{
614#ifdef CONFIG_IGB_DCA
615 dca_unregister_notify(&dca_notifier);
616#endif
617 pci_unregister_driver(&igb_driver);
618}
619
620module_exit(igb_exit_module);
621
622#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
623/**
624 * igb_cache_ring_register - Descriptor ring to register mapping
625 * @adapter: board private structure to initialize
626 *
627 * Once we know the feature-set enabled for the device, we'll cache
628 * the register offset the descriptor ring is assigned to.
629 **/
630static void igb_cache_ring_register(struct igb_adapter *adapter)
631{
632 int i = 0, j = 0;
633 u32 rbase_offset = adapter->vfs_allocated_count;
634
635 switch (adapter->hw.mac.type) {
636 case e1000_82576:
637 /* The queues are allocated for virtualization such that VF 0
638 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
639 * In order to avoid collision we start at the first free queue
640 * and continue consuming queues in the same sequence
641 */
642 if (adapter->vfs_allocated_count) {
643 for (; i < adapter->rss_queues; i++)
644 adapter->rx_ring[i]->reg_idx = rbase_offset +
645 Q_IDX_82576(i);
646 }
647 case e1000_82575:
648 case e1000_82580:
649 case e1000_i350:
650 default:
651 for (; i < adapter->num_rx_queues; i++)
652 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
653 for (; j < adapter->num_tx_queues; j++)
654 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
655 break;
656 }
657}
658
659static void igb_free_queues(struct igb_adapter *adapter)
660{
661 int i;
662
663 for (i = 0; i < adapter->num_tx_queues; i++) {
664 kfree(adapter->tx_ring[i]);
665 adapter->tx_ring[i] = NULL;
666 }
667 for (i = 0; i < adapter->num_rx_queues; i++) {
668 kfree(adapter->rx_ring[i]);
669 adapter->rx_ring[i] = NULL;
670 }
671 adapter->num_rx_queues = 0;
672 adapter->num_tx_queues = 0;
673}
674
675/**
676 * igb_alloc_queues - Allocate memory for all rings
677 * @adapter: board private structure to initialize
678 *
679 * We allocate one ring per queue at run-time since we don't know the
680 * number of queues at compile-time.
681 **/
682static int igb_alloc_queues(struct igb_adapter *adapter)
683{
684 struct igb_ring *ring;
685 int i;
686
687 for (i = 0; i < adapter->num_tx_queues; i++) {
688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
689 if (!ring)
690 goto err;
691 ring->count = adapter->tx_ring_count;
692 ring->queue_index = i;
693 ring->dev = &adapter->pdev->dev;
694 ring->netdev = adapter->netdev;
695 /* For 82575, context index must be unique per ring. */
696 if (adapter->hw.mac.type == e1000_82575)
697 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
698 adapter->tx_ring[i] = ring;
699 }
700
701 for (i = 0; i < adapter->num_rx_queues; i++) {
702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
703 if (!ring)
704 goto err;
705 ring->count = adapter->rx_ring_count;
706 ring->queue_index = i;
707 ring->dev = &adapter->pdev->dev;
708 ring->netdev = adapter->netdev;
709 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
710 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
711 /* set flag indicating ring supports SCTP checksum offload */
712 if (adapter->hw.mac.type >= e1000_82576)
713 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
714 adapter->rx_ring[i] = ring;
715 }
716
717 igb_cache_ring_register(adapter);
718
719 return 0;
720
721err:
722 igb_free_queues(adapter);
723
724 return -ENOMEM;
725}
726
727#define IGB_N0_QUEUE -1
728static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
729{
730 u32 msixbm = 0;
731 struct igb_adapter *adapter = q_vector->adapter;
732 struct e1000_hw *hw = &adapter->hw;
733 u32 ivar, index;
734 int rx_queue = IGB_N0_QUEUE;
735 int tx_queue = IGB_N0_QUEUE;
736
737 if (q_vector->rx_ring)
738 rx_queue = q_vector->rx_ring->reg_idx;
739 if (q_vector->tx_ring)
740 tx_queue = q_vector->tx_ring->reg_idx;
741
742 switch (hw->mac.type) {
743 case e1000_82575:
744 /* The 82575 assigns vectors using a bitmask, which matches the
745 bitmask for the EICR/EIMS/EIMC registers. To assign one
746 or more queues to a vector, we write the appropriate bits
747 into the MSIXBM register for that vector. */
748 if (rx_queue > IGB_N0_QUEUE)
749 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
750 if (tx_queue > IGB_N0_QUEUE)
751 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
752 if (!adapter->msix_entries && msix_vector == 0)
753 msixbm |= E1000_EIMS_OTHER;
754 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
755 q_vector->eims_value = msixbm;
756 break;
757 case e1000_82576:
758 /* 82576 uses a table-based method for assigning vectors.
759 Each queue has a single entry in the table to which we write
760 a vector number along with a "valid" bit. Sadly, the layout
761 of the table is somewhat counterintuitive. */
762 if (rx_queue > IGB_N0_QUEUE) {
763 index = (rx_queue & 0x7);
764 ivar = array_rd32(E1000_IVAR0, index);
765 if (rx_queue < 8) {
766 /* vector goes into low byte of register */
767 ivar = ivar & 0xFFFFFF00;
768 ivar |= msix_vector | E1000_IVAR_VALID;
769 } else {
770 /* vector goes into third byte of register */
771 ivar = ivar & 0xFF00FFFF;
772 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
773 }
774 array_wr32(E1000_IVAR0, index, ivar);
775 }
776 if (tx_queue > IGB_N0_QUEUE) {
777 index = (tx_queue & 0x7);
778 ivar = array_rd32(E1000_IVAR0, index);
779 if (tx_queue < 8) {
780 /* vector goes into second byte of register */
781 ivar = ivar & 0xFFFF00FF;
782 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
783 } else {
784 /* vector goes into high byte of register */
785 ivar = ivar & 0x00FFFFFF;
786 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
787 }
788 array_wr32(E1000_IVAR0, index, ivar);
789 }
790 q_vector->eims_value = 1 << msix_vector;
791 break;
792 case e1000_82580:
793 case e1000_i350:
794 /* 82580 uses the same table-based approach as 82576 but has fewer
795 entries as a result we carry over for queues greater than 4. */
796 if (rx_queue > IGB_N0_QUEUE) {
797 index = (rx_queue >> 1);
798 ivar = array_rd32(E1000_IVAR0, index);
799 if (rx_queue & 0x1) {
800 /* vector goes into third byte of register */
801 ivar = ivar & 0xFF00FFFF;
802 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
803 } else {
804 /* vector goes into low byte of register */
805 ivar = ivar & 0xFFFFFF00;
806 ivar |= msix_vector | E1000_IVAR_VALID;
807 }
808 array_wr32(E1000_IVAR0, index, ivar);
809 }
810 if (tx_queue > IGB_N0_QUEUE) {
811 index = (tx_queue >> 1);
812 ivar = array_rd32(E1000_IVAR0, index);
813 if (tx_queue & 0x1) {
814 /* vector goes into high byte of register */
815 ivar = ivar & 0x00FFFFFF;
816 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
817 } else {
818 /* vector goes into second byte of register */
819 ivar = ivar & 0xFFFF00FF;
820 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
821 }
822 array_wr32(E1000_IVAR0, index, ivar);
823 }
824 q_vector->eims_value = 1 << msix_vector;
825 break;
826 default:
827 BUG();
828 break;
829 }
830
831 /* add q_vector eims value to global eims_enable_mask */
832 adapter->eims_enable_mask |= q_vector->eims_value;
833
834 /* configure q_vector to set itr on first interrupt */
835 q_vector->set_itr = 1;
836}
837
838/**
839 * igb_configure_msix - Configure MSI-X hardware
840 *
841 * igb_configure_msix sets up the hardware to properly
842 * generate MSI-X interrupts.
843 **/
844static void igb_configure_msix(struct igb_adapter *adapter)
845{
846 u32 tmp;
847 int i, vector = 0;
848 struct e1000_hw *hw = &adapter->hw;
849
850 adapter->eims_enable_mask = 0;
851
852 /* set vector for other causes, i.e. link changes */
853 switch (hw->mac.type) {
854 case e1000_82575:
855 tmp = rd32(E1000_CTRL_EXT);
856 /* enable MSI-X PBA support*/
857 tmp |= E1000_CTRL_EXT_PBA_CLR;
858
859 /* Auto-Mask interrupts upon ICR read. */
860 tmp |= E1000_CTRL_EXT_EIAME;
861 tmp |= E1000_CTRL_EXT_IRCA;
862
863 wr32(E1000_CTRL_EXT, tmp);
864
865 /* enable msix_other interrupt */
866 array_wr32(E1000_MSIXBM(0), vector++,
867 E1000_EIMS_OTHER);
868 adapter->eims_other = E1000_EIMS_OTHER;
869
870 break;
871
872 case e1000_82576:
873 case e1000_82580:
874 case e1000_i350:
875 /* Turn on MSI-X capability first, or our settings
876 * won't stick. And it will take days to debug. */
877 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
878 E1000_GPIE_PBA | E1000_GPIE_EIAME |
879 E1000_GPIE_NSICR);
880
881 /* enable msix_other interrupt */
882 adapter->eims_other = 1 << vector;
883 tmp = (vector++ | E1000_IVAR_VALID) << 8;
884
885 wr32(E1000_IVAR_MISC, tmp);
886 break;
887 default:
888 /* do nothing, since nothing else supports MSI-X */
889 break;
890 } /* switch (hw->mac.type) */
891
892 adapter->eims_enable_mask |= adapter->eims_other;
893
894 for (i = 0; i < adapter->num_q_vectors; i++)
895 igb_assign_vector(adapter->q_vector[i], vector++);
896
897 wrfl();
898}
899
900/**
901 * igb_request_msix - Initialize MSI-X interrupts
902 *
903 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
904 * kernel.
905 **/
906static int igb_request_msix(struct igb_adapter *adapter)
907{
908 struct net_device *netdev = adapter->netdev;
909 struct e1000_hw *hw = &adapter->hw;
910 int i, err = 0, vector = 0;
911
912 err = request_irq(adapter->msix_entries[vector].vector,
913 igb_msix_other, 0, netdev->name, adapter);
914 if (err)
915 goto out;
916 vector++;
917
918 for (i = 0; i < adapter->num_q_vectors; i++) {
919 struct igb_q_vector *q_vector = adapter->q_vector[i];
920
921 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
922
923 if (q_vector->rx_ring && q_vector->tx_ring)
924 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
925 q_vector->rx_ring->queue_index);
926 else if (q_vector->tx_ring)
927 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
928 q_vector->tx_ring->queue_index);
929 else if (q_vector->rx_ring)
930 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
931 q_vector->rx_ring->queue_index);
932 else
933 sprintf(q_vector->name, "%s-unused", netdev->name);
934
935 err = request_irq(adapter->msix_entries[vector].vector,
936 igb_msix_ring, 0, q_vector->name,
937 q_vector);
938 if (err)
939 goto out;
940 vector++;
941 }
942
943 igb_configure_msix(adapter);
944 return 0;
945out:
946 return err;
947}
948
949static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
950{
951 if (adapter->msix_entries) {
952 pci_disable_msix(adapter->pdev);
953 kfree(adapter->msix_entries);
954 adapter->msix_entries = NULL;
955 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
956 pci_disable_msi(adapter->pdev);
957 }
958}
959
960/**
961 * igb_free_q_vectors - Free memory allocated for interrupt vectors
962 * @adapter: board private structure to initialize
963 *
964 * This function frees the memory allocated to the q_vectors. In addition if
965 * NAPI is enabled it will delete any references to the NAPI struct prior
966 * to freeing the q_vector.
967 **/
968static void igb_free_q_vectors(struct igb_adapter *adapter)
969{
970 int v_idx;
971
972 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
973 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
974 adapter->q_vector[v_idx] = NULL;
975 if (!q_vector)
976 continue;
977 netif_napi_del(&q_vector->napi);
978 kfree(q_vector);
979 }
980 adapter->num_q_vectors = 0;
981}
982
983/**
984 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
985 *
986 * This function resets the device so that it has 0 rx queues, tx queues, and
987 * MSI-X interrupts allocated.
988 */
989static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
990{
991 igb_free_queues(adapter);
992 igb_free_q_vectors(adapter);
993 igb_reset_interrupt_capability(adapter);
994}
995
996/**
997 * igb_set_interrupt_capability - set MSI or MSI-X if supported
998 *
999 * Attempt to configure interrupts using the best available
1000 * capabilities of the hardware and kernel.
1001 **/
1002static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1003{
1004 int err;
1005 int numvecs, i;
1006
1007 /* Number of supported queues. */
1008 adapter->num_rx_queues = adapter->rss_queues;
1009 if (adapter->vfs_allocated_count)
1010 adapter->num_tx_queues = 1;
1011 else
1012 adapter->num_tx_queues = adapter->rss_queues;
1013
1014 /* start with one vector for every rx queue */
1015 numvecs = adapter->num_rx_queues;
1016
1017 /* if tx handler is separate add 1 for every tx queue */
1018 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1019 numvecs += adapter->num_tx_queues;
1020
1021 /* store the number of vectors reserved for queues */
1022 adapter->num_q_vectors = numvecs;
1023
1024 /* add 1 vector for link status interrupts */
1025 numvecs++;
1026 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1027 GFP_KERNEL);
1028 if (!adapter->msix_entries)
1029 goto msi_only;
1030
1031 for (i = 0; i < numvecs; i++)
1032 adapter->msix_entries[i].entry = i;
1033
1034 err = pci_enable_msix(adapter->pdev,
1035 adapter->msix_entries,
1036 numvecs);
1037 if (err == 0)
1038 goto out;
1039
1040 igb_reset_interrupt_capability(adapter);
1041
1042 /* If we can't do MSI-X, try MSI */
1043msi_only:
1044#ifdef CONFIG_PCI_IOV
1045 /* disable SR-IOV for non MSI-X configurations */
1046 if (adapter->vf_data) {
1047 struct e1000_hw *hw = &adapter->hw;
1048 /* disable iov and allow time for transactions to clear */
1049 pci_disable_sriov(adapter->pdev);
1050 msleep(500);
1051
1052 kfree(adapter->vf_data);
1053 adapter->vf_data = NULL;
1054 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1055 wrfl();
1056 msleep(100);
1057 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1058 }
1059#endif
1060 adapter->vfs_allocated_count = 0;
1061 adapter->rss_queues = 1;
1062 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1063 adapter->num_rx_queues = 1;
1064 adapter->num_tx_queues = 1;
1065 adapter->num_q_vectors = 1;
1066 if (!pci_enable_msi(adapter->pdev))
1067 adapter->flags |= IGB_FLAG_HAS_MSI;
1068out:
1069 /* Notify the stack of the (possibly) reduced queue counts. */
1070 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1071 return netif_set_real_num_rx_queues(adapter->netdev,
1072 adapter->num_rx_queues);
1073}
1074
1075/**
1076 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1077 * @adapter: board private structure to initialize
1078 *
1079 * We allocate one q_vector per queue interrupt. If allocation fails we
1080 * return -ENOMEM.
1081 **/
1082static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1083{
1084 struct igb_q_vector *q_vector;
1085 struct e1000_hw *hw = &adapter->hw;
1086 int v_idx;
1087
1088 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1089 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1090 if (!q_vector)
1091 goto err_out;
1092 q_vector->adapter = adapter;
1093 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1094 q_vector->itr_val = IGB_START_ITR;
1095 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1096 adapter->q_vector[v_idx] = q_vector;
1097 }
1098 return 0;
1099
1100err_out:
1101 igb_free_q_vectors(adapter);
1102 return -ENOMEM;
1103}
1104
1105static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1106 int ring_idx, int v_idx)
1107{
1108 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1109
1110 q_vector->rx_ring = adapter->rx_ring[ring_idx];
1111 q_vector->rx_ring->q_vector = q_vector;
1112 q_vector->itr_val = adapter->rx_itr_setting;
1113 if (q_vector->itr_val && q_vector->itr_val <= 3)
1114 q_vector->itr_val = IGB_START_ITR;
1115}
1116
1117static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1118 int ring_idx, int v_idx)
1119{
1120 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1121
1122 q_vector->tx_ring = adapter->tx_ring[ring_idx];
1123 q_vector->tx_ring->q_vector = q_vector;
1124 q_vector->itr_val = adapter->tx_itr_setting;
1125 if (q_vector->itr_val && q_vector->itr_val <= 3)
1126 q_vector->itr_val = IGB_START_ITR;
1127}
1128
1129/**
1130 * igb_map_ring_to_vector - maps allocated queues to vectors
1131 *
1132 * This function maps the recently allocated queues to vectors.
1133 **/
1134static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1135{
1136 int i;
1137 int v_idx = 0;
1138
1139 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1140 (adapter->num_q_vectors < adapter->num_tx_queues))
1141 return -ENOMEM;
1142
1143 if (adapter->num_q_vectors >=
1144 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1145 for (i = 0; i < adapter->num_rx_queues; i++)
1146 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1147 for (i = 0; i < adapter->num_tx_queues; i++)
1148 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1149 } else {
1150 for (i = 0; i < adapter->num_rx_queues; i++) {
1151 if (i < adapter->num_tx_queues)
1152 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1153 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1154 }
1155 for (; i < adapter->num_tx_queues; i++)
1156 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1157 }
1158 return 0;
1159}
1160
1161/**
1162 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1163 *
1164 * This function initializes the interrupts and allocates all of the queues.
1165 **/
1166static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1167{
1168 struct pci_dev *pdev = adapter->pdev;
1169 int err;
1170
1171 err = igb_set_interrupt_capability(adapter);
1172 if (err)
1173 return err;
1174
1175 err = igb_alloc_q_vectors(adapter);
1176 if (err) {
1177 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1178 goto err_alloc_q_vectors;
1179 }
1180
1181 err = igb_alloc_queues(adapter);
1182 if (err) {
1183 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1184 goto err_alloc_queues;
1185 }
1186
1187 err = igb_map_ring_to_vector(adapter);
1188 if (err) {
1189 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1190 goto err_map_queues;
1191 }
1192
1193
1194 return 0;
1195err_map_queues:
1196 igb_free_queues(adapter);
1197err_alloc_queues:
1198 igb_free_q_vectors(adapter);
1199err_alloc_q_vectors:
1200 igb_reset_interrupt_capability(adapter);
1201 return err;
1202}
1203
1204/**
1205 * igb_request_irq - initialize interrupts
1206 *
1207 * Attempts to configure interrupts using the best available
1208 * capabilities of the hardware and kernel.
1209 **/
1210static int igb_request_irq(struct igb_adapter *adapter)
1211{
1212 struct net_device *netdev = adapter->netdev;
1213 struct pci_dev *pdev = adapter->pdev;
1214 int err = 0;
1215
1216 if (adapter->msix_entries) {
1217 err = igb_request_msix(adapter);
1218 if (!err)
1219 goto request_done;
1220 /* fall back to MSI */
1221 igb_clear_interrupt_scheme(adapter);
1222 if (!pci_enable_msi(adapter->pdev))
1223 adapter->flags |= IGB_FLAG_HAS_MSI;
1224 igb_free_all_tx_resources(adapter);
1225 igb_free_all_rx_resources(adapter);
1226 adapter->num_tx_queues = 1;
1227 adapter->num_rx_queues = 1;
1228 adapter->num_q_vectors = 1;
1229 err = igb_alloc_q_vectors(adapter);
1230 if (err) {
1231 dev_err(&pdev->dev,
1232 "Unable to allocate memory for vectors\n");
1233 goto request_done;
1234 }
1235 err = igb_alloc_queues(adapter);
1236 if (err) {
1237 dev_err(&pdev->dev,
1238 "Unable to allocate memory for queues\n");
1239 igb_free_q_vectors(adapter);
1240 goto request_done;
1241 }
1242 igb_setup_all_tx_resources(adapter);
1243 igb_setup_all_rx_resources(adapter);
1244 } else {
1245 igb_assign_vector(adapter->q_vector[0], 0);
1246 }
1247
1248 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1249 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
1250 netdev->name, adapter);
1251 if (!err)
1252 goto request_done;
1253
1254 /* fall back to legacy interrupts */
1255 igb_reset_interrupt_capability(adapter);
1256 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1257 }
1258
1259 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
1260 netdev->name, adapter);
1261
1262 if (err)
1263 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1264 err);
1265
1266request_done:
1267 return err;
1268}
1269
1270static void igb_free_irq(struct igb_adapter *adapter)
1271{
1272 if (adapter->msix_entries) {
1273 int vector = 0, i;
1274
1275 free_irq(adapter->msix_entries[vector++].vector, adapter);
1276
1277 for (i = 0; i < adapter->num_q_vectors; i++) {
1278 struct igb_q_vector *q_vector = adapter->q_vector[i];
1279 free_irq(adapter->msix_entries[vector++].vector,
1280 q_vector);
1281 }
1282 } else {
1283 free_irq(adapter->pdev->irq, adapter);
1284 }
1285}
1286
1287/**
1288 * igb_irq_disable - Mask off interrupt generation on the NIC
1289 * @adapter: board private structure
1290 **/
1291static void igb_irq_disable(struct igb_adapter *adapter)
1292{
1293 struct e1000_hw *hw = &adapter->hw;
1294
1295 /*
1296 * we need to be careful when disabling interrupts. The VFs are also
1297 * mapped into these registers and so clearing the bits can cause
1298 * issues on the VF drivers so we only need to clear what we set
1299 */
1300 if (adapter->msix_entries) {
1301 u32 regval = rd32(E1000_EIAM);
1302 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1303 wr32(E1000_EIMC, adapter->eims_enable_mask);
1304 regval = rd32(E1000_EIAC);
1305 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1306 }
1307
1308 wr32(E1000_IAM, 0);
1309 wr32(E1000_IMC, ~0);
1310 wrfl();
1311 if (adapter->msix_entries) {
1312 int i;
1313 for (i = 0; i < adapter->num_q_vectors; i++)
1314 synchronize_irq(adapter->msix_entries[i].vector);
1315 } else {
1316 synchronize_irq(adapter->pdev->irq);
1317 }
1318}
1319
1320/**
1321 * igb_irq_enable - Enable default interrupt generation settings
1322 * @adapter: board private structure
1323 **/
1324static void igb_irq_enable(struct igb_adapter *adapter)
1325{
1326 struct e1000_hw *hw = &adapter->hw;
1327
1328 if (adapter->msix_entries) {
1329 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
1330 u32 regval = rd32(E1000_EIAC);
1331 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1332 regval = rd32(E1000_EIAM);
1333 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1334 wr32(E1000_EIMS, adapter->eims_enable_mask);
1335 if (adapter->vfs_allocated_count) {
1336 wr32(E1000_MBVFIMR, 0xFF);
1337 ims |= E1000_IMS_VMMB;
1338 }
1339 if (adapter->hw.mac.type == e1000_82580)
1340 ims |= E1000_IMS_DRSTA;
1341
1342 wr32(E1000_IMS, ims);
1343 } else {
1344 wr32(E1000_IMS, IMS_ENABLE_MASK |
1345 E1000_IMS_DRSTA);
1346 wr32(E1000_IAM, IMS_ENABLE_MASK |
1347 E1000_IMS_DRSTA);
1348 }
1349}
1350
1351static void igb_update_mng_vlan(struct igb_adapter *adapter)
1352{
1353 struct e1000_hw *hw = &adapter->hw;
1354 u16 vid = adapter->hw.mng_cookie.vlan_id;
1355 u16 old_vid = adapter->mng_vlan_id;
1356
1357 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1358 /* add VID to filter table */
1359 igb_vfta_set(hw, vid, true);
1360 adapter->mng_vlan_id = vid;
1361 } else {
1362 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1363 }
1364
1365 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1366 (vid != old_vid) &&
1367 !test_bit(old_vid, adapter->active_vlans)) {
1368 /* remove VID from filter table */
1369 igb_vfta_set(hw, old_vid, false);
1370 }
1371}
1372
1373/**
1374 * igb_release_hw_control - release control of the h/w to f/w
1375 * @adapter: address of board private structure
1376 *
1377 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1378 * For ASF and Pass Through versions of f/w this means that the
1379 * driver is no longer loaded.
1380 *
1381 **/
1382static void igb_release_hw_control(struct igb_adapter *adapter)
1383{
1384 struct e1000_hw *hw = &adapter->hw;
1385 u32 ctrl_ext;
1386
1387 /* Let firmware take over control of h/w */
1388 ctrl_ext = rd32(E1000_CTRL_EXT);
1389 wr32(E1000_CTRL_EXT,
1390 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1391}
1392
1393/**
1394 * igb_get_hw_control - get control of the h/w from f/w
1395 * @adapter: address of board private structure
1396 *
1397 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1398 * For ASF and Pass Through versions of f/w this means that
1399 * the driver is loaded.
1400 *
1401 **/
1402static void igb_get_hw_control(struct igb_adapter *adapter)
1403{
1404 struct e1000_hw *hw = &adapter->hw;
1405 u32 ctrl_ext;
1406
1407 /* Let firmware know the driver has taken over */
1408 ctrl_ext = rd32(E1000_CTRL_EXT);
1409 wr32(E1000_CTRL_EXT,
1410 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1411}
1412
1413/**
1414 * igb_configure - configure the hardware for RX and TX
1415 * @adapter: private board structure
1416 **/
1417static void igb_configure(struct igb_adapter *adapter)
1418{
1419 struct net_device *netdev = adapter->netdev;
1420 int i;
1421
1422 igb_get_hw_control(adapter);
1423 igb_set_rx_mode(netdev);
1424
1425 igb_restore_vlan(adapter);
1426
1427 igb_setup_tctl(adapter);
1428 igb_setup_mrqc(adapter);
1429 igb_setup_rctl(adapter);
1430
1431 igb_configure_tx(adapter);
1432 igb_configure_rx(adapter);
1433
1434 igb_rx_fifo_flush_82575(&adapter->hw);
1435
1436 /* call igb_desc_unused which always leaves
1437 * at least 1 descriptor unused to make sure
1438 * next_to_use != next_to_clean */
1439 for (i = 0; i < adapter->num_rx_queues; i++) {
1440 struct igb_ring *ring = adapter->rx_ring[i];
1441 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1442 }
1443}
1444
1445/**
1446 * igb_power_up_link - Power up the phy/serdes link
1447 * @adapter: address of board private structure
1448 **/
1449void igb_power_up_link(struct igb_adapter *adapter)
1450{
1451 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1452 igb_power_up_phy_copper(&adapter->hw);
1453 else
1454 igb_power_up_serdes_link_82575(&adapter->hw);
1455}
1456
1457/**
1458 * igb_power_down_link - Power down the phy/serdes link
1459 * @adapter: address of board private structure
1460 */
1461static void igb_power_down_link(struct igb_adapter *adapter)
1462{
1463 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1464 igb_power_down_phy_copper_82575(&adapter->hw);
1465 else
1466 igb_shutdown_serdes_link_82575(&adapter->hw);
1467}
1468
1469/**
1470 * igb_up - Open the interface and prepare it to handle traffic
1471 * @adapter: board private structure
1472 **/
1473int igb_up(struct igb_adapter *adapter)
1474{
1475 struct e1000_hw *hw = &adapter->hw;
1476 int i;
1477
1478 /* hardware has been reset, we need to reload some things */
1479 igb_configure(adapter);
1480
1481 clear_bit(__IGB_DOWN, &adapter->state);
1482
1483 for (i = 0; i < adapter->num_q_vectors; i++) {
1484 struct igb_q_vector *q_vector = adapter->q_vector[i];
1485 napi_enable(&q_vector->napi);
1486 }
1487 if (adapter->msix_entries)
1488 igb_configure_msix(adapter);
1489 else
1490 igb_assign_vector(adapter->q_vector[0], 0);
1491
1492 /* Clear any pending interrupts. */
1493 rd32(E1000_ICR);
1494 igb_irq_enable(adapter);
1495
1496 /* notify VFs that reset has been completed */
1497 if (adapter->vfs_allocated_count) {
1498 u32 reg_data = rd32(E1000_CTRL_EXT);
1499 reg_data |= E1000_CTRL_EXT_PFRSTD;
1500 wr32(E1000_CTRL_EXT, reg_data);
1501 }
1502
1503 netif_tx_start_all_queues(adapter->netdev);
1504
1505 /* start the watchdog. */
1506 hw->mac.get_link_status = 1;
1507 schedule_work(&adapter->watchdog_task);
1508
1509 return 0;
1510}
1511
1512void igb_down(struct igb_adapter *adapter)
1513{
1514 struct net_device *netdev = adapter->netdev;
1515 struct e1000_hw *hw = &adapter->hw;
1516 u32 tctl, rctl;
1517 int i;
1518
1519 /* signal that we're down so the interrupt handler does not
1520 * reschedule our watchdog timer */
1521 set_bit(__IGB_DOWN, &adapter->state);
1522
1523 /* disable receives in the hardware */
1524 rctl = rd32(E1000_RCTL);
1525 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1526 /* flush and sleep below */
1527
1528 netif_tx_stop_all_queues(netdev);
1529
1530 /* disable transmits in the hardware */
1531 tctl = rd32(E1000_TCTL);
1532 tctl &= ~E1000_TCTL_EN;
1533 wr32(E1000_TCTL, tctl);
1534 /* flush both disables and wait for them to finish */
1535 wrfl();
1536 msleep(10);
1537
1538 for (i = 0; i < adapter->num_q_vectors; i++) {
1539 struct igb_q_vector *q_vector = adapter->q_vector[i];
1540 napi_disable(&q_vector->napi);
1541 }
1542
1543 igb_irq_disable(adapter);
1544
1545 del_timer_sync(&adapter->watchdog_timer);
1546 del_timer_sync(&adapter->phy_info_timer);
1547
1548 netif_carrier_off(netdev);
1549
1550 /* record the stats before reset*/
1551 spin_lock(&adapter->stats64_lock);
1552 igb_update_stats(adapter, &adapter->stats64);
1553 spin_unlock(&adapter->stats64_lock);
1554
1555 adapter->link_speed = 0;
1556 adapter->link_duplex = 0;
1557
1558 if (!pci_channel_offline(adapter->pdev))
1559 igb_reset(adapter);
1560 igb_clean_all_tx_rings(adapter);
1561 igb_clean_all_rx_rings(adapter);
1562#ifdef CONFIG_IGB_DCA
1563
1564 /* since we reset the hardware DCA settings were cleared */
1565 igb_setup_dca(adapter);
1566#endif
1567}
1568
1569void igb_reinit_locked(struct igb_adapter *adapter)
1570{
1571 WARN_ON(in_interrupt());
1572 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1573 msleep(1);
1574 igb_down(adapter);
1575 igb_up(adapter);
1576 clear_bit(__IGB_RESETTING, &adapter->state);
1577}
1578
1579void igb_reset(struct igb_adapter *adapter)
1580{
1581 struct pci_dev *pdev = adapter->pdev;
1582 struct e1000_hw *hw = &adapter->hw;
1583 struct e1000_mac_info *mac = &hw->mac;
1584 struct e1000_fc_info *fc = &hw->fc;
1585 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1586 u16 hwm;
1587
1588 /* Repartition Pba for greater than 9k mtu
1589 * To take effect CTRL.RST is required.
1590 */
1591 switch (mac->type) {
1592 case e1000_i350:
1593 case e1000_82580:
1594 pba = rd32(E1000_RXPBS);
1595 pba = igb_rxpbs_adjust_82580(pba);
1596 break;
1597 case e1000_82576:
1598 pba = rd32(E1000_RXPBS);
1599 pba &= E1000_RXPBS_SIZE_MASK_82576;
1600 break;
1601 case e1000_82575:
1602 default:
1603 pba = E1000_PBA_34K;
1604 break;
1605 }
1606
1607 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1608 (mac->type < e1000_82576)) {
1609 /* adjust PBA for jumbo frames */
1610 wr32(E1000_PBA, pba);
1611
1612 /* To maintain wire speed transmits, the Tx FIFO should be
1613 * large enough to accommodate two full transmit packets,
1614 * rounded up to the next 1KB and expressed in KB. Likewise,
1615 * the Rx FIFO should be large enough to accommodate at least
1616 * one full receive packet and is similarly rounded up and
1617 * expressed in KB. */
1618 pba = rd32(E1000_PBA);
1619 /* upper 16 bits has Tx packet buffer allocation size in KB */
1620 tx_space = pba >> 16;
1621 /* lower 16 bits has Rx packet buffer allocation size in KB */
1622 pba &= 0xffff;
1623 /* the tx fifo also stores 16 bytes of information about the tx
1624 * but don't include ethernet FCS because hardware appends it */
1625 min_tx_space = (adapter->max_frame_size +
1626 sizeof(union e1000_adv_tx_desc) -
1627 ETH_FCS_LEN) * 2;
1628 min_tx_space = ALIGN(min_tx_space, 1024);
1629 min_tx_space >>= 10;
1630 /* software strips receive CRC, so leave room for it */
1631 min_rx_space = adapter->max_frame_size;
1632 min_rx_space = ALIGN(min_rx_space, 1024);
1633 min_rx_space >>= 10;
1634
1635 /* If current Tx allocation is less than the min Tx FIFO size,
1636 * and the min Tx FIFO size is less than the current Rx FIFO
1637 * allocation, take space away from current Rx allocation */
1638 if (tx_space < min_tx_space &&
1639 ((min_tx_space - tx_space) < pba)) {
1640 pba = pba - (min_tx_space - tx_space);
1641
1642 /* if short on rx space, rx wins and must trump tx
1643 * adjustment */
1644 if (pba < min_rx_space)
1645 pba = min_rx_space;
1646 }
1647 wr32(E1000_PBA, pba);
1648 }
1649
1650 /* flow control settings */
1651 /* The high water mark must be low enough to fit one full frame
1652 * (or the size used for early receive) above it in the Rx FIFO.
1653 * Set it to the lower of:
1654 * - 90% of the Rx FIFO size, or
1655 * - the full Rx FIFO size minus one full frame */
1656 hwm = min(((pba << 10) * 9 / 10),
1657 ((pba << 10) - 2 * adapter->max_frame_size));
1658
1659 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1660 fc->low_water = fc->high_water - 16;
1661 fc->pause_time = 0xFFFF;
1662 fc->send_xon = 1;
1663 fc->current_mode = fc->requested_mode;
1664
1665 /* disable receive for all VFs and wait one second */
1666 if (adapter->vfs_allocated_count) {
1667 int i;
1668 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1669 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1670
1671 /* ping all the active vfs to let them know we are going down */
1672 igb_ping_all_vfs(adapter);
1673
1674 /* disable transmits and receives */
1675 wr32(E1000_VFRE, 0);
1676 wr32(E1000_VFTE, 0);
1677 }
1678
1679 /* Allow time for pending master requests to run */
1680 hw->mac.ops.reset_hw(hw);
1681 wr32(E1000_WUC, 0);
1682
1683 if (hw->mac.ops.init_hw(hw))
1684 dev_err(&pdev->dev, "Hardware Error\n");
1685 if (hw->mac.type > e1000_82580) {
1686 if (adapter->flags & IGB_FLAG_DMAC) {
1687 u32 reg;
1688
1689 /*
1690 * DMA Coalescing high water mark needs to be higher
1691 * than * the * Rx threshold. The Rx threshold is
1692 * currently * pba - 6, so we * should use a high water
1693 * mark of pba * - 4. */
1694 hwm = (pba - 4) << 10;
1695
1696 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1697 & E1000_DMACR_DMACTHR_MASK);
1698
1699 /* transition to L0x or L1 if available..*/
1700 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1701
1702 /* watchdog timer= +-1000 usec in 32usec intervals */
1703 reg |= (1000 >> 5);
1704 wr32(E1000_DMACR, reg);
1705
1706 /* no lower threshold to disable coalescing(smart fifb)
1707 * -UTRESH=0*/
1708 wr32(E1000_DMCRTRH, 0);
1709
1710 /* set hwm to PBA - 2 * max frame size */
1711 wr32(E1000_FCRTC, hwm);
1712
1713 /*
1714 * This sets the time to wait before requesting tran-
1715 * sition to * low power state to number of usecs needed
1716 * to receive 1 512 * byte frame at gigabit line rate
1717 */
1718 reg = rd32(E1000_DMCTLX);
1719 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1720
1721 /* Delay 255 usec before entering Lx state. */
1722 reg |= 0xFF;
1723 wr32(E1000_DMCTLX, reg);
1724
1725 /* free space in Tx packet buffer to wake from DMAC */
1726 wr32(E1000_DMCTXTH,
1727 (IGB_MIN_TXPBSIZE -
1728 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1729 >> 6);
1730
1731 /* make low power state decision controlled by DMAC */
1732 reg = rd32(E1000_PCIEMISC);
1733 reg |= E1000_PCIEMISC_LX_DECISION;
1734 wr32(E1000_PCIEMISC, reg);
1735 } /* end if IGB_FLAG_DMAC set */
1736 }
1737 if (hw->mac.type == e1000_82580) {
1738 u32 reg = rd32(E1000_PCIEMISC);
1739 wr32(E1000_PCIEMISC,
1740 reg & ~E1000_PCIEMISC_LX_DECISION);
1741 }
1742 if (!netif_running(adapter->netdev))
1743 igb_power_down_link(adapter);
1744
1745 igb_update_mng_vlan(adapter);
1746
1747 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1748 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1749
1750 igb_get_phy_info(hw);
1751}
1752
1753static u32 igb_fix_features(struct net_device *netdev, u32 features)
1754{
1755 /*
1756 * Since there is no support for separate rx/tx vlan accel
1757 * enable/disable make sure tx flag is always in same state as rx.
1758 */
1759 if (features & NETIF_F_HW_VLAN_RX)
1760 features |= NETIF_F_HW_VLAN_TX;
1761 else
1762 features &= ~NETIF_F_HW_VLAN_TX;
1763
1764 return features;
1765}
1766
1767static int igb_set_features(struct net_device *netdev, u32 features)
1768{
1769 struct igb_adapter *adapter = netdev_priv(netdev);
1770 int i;
1771 u32 changed = netdev->features ^ features;
1772
1773 for (i = 0; i < adapter->num_rx_queues; i++) {
1774 if (features & NETIF_F_RXCSUM)
1775 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
1776 else
1777 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
1778 }
1779
1780 if (changed & NETIF_F_HW_VLAN_RX)
1781 igb_vlan_mode(netdev, features);
1782
1783 return 0;
1784}
1785
1786static const struct net_device_ops igb_netdev_ops = {
1787 .ndo_open = igb_open,
1788 .ndo_stop = igb_close,
1789 .ndo_start_xmit = igb_xmit_frame_adv,
1790 .ndo_get_stats64 = igb_get_stats64,
1791 .ndo_set_rx_mode = igb_set_rx_mode,
1792 .ndo_set_multicast_list = igb_set_rx_mode,
1793 .ndo_set_mac_address = igb_set_mac,
1794 .ndo_change_mtu = igb_change_mtu,
1795 .ndo_do_ioctl = igb_ioctl,
1796 .ndo_tx_timeout = igb_tx_timeout,
1797 .ndo_validate_addr = eth_validate_addr,
1798 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1799 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1800 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1801 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1802 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1803 .ndo_get_vf_config = igb_ndo_get_vf_config,
1804#ifdef CONFIG_NET_POLL_CONTROLLER
1805 .ndo_poll_controller = igb_netpoll,
1806#endif
1807 .ndo_fix_features = igb_fix_features,
1808 .ndo_set_features = igb_set_features,
1809};
1810
1811/**
1812 * igb_probe - Device Initialization Routine
1813 * @pdev: PCI device information struct
1814 * @ent: entry in igb_pci_tbl
1815 *
1816 * Returns 0 on success, negative on failure
1817 *
1818 * igb_probe initializes an adapter identified by a pci_dev structure.
1819 * The OS initialization, configuring of the adapter private structure,
1820 * and a hardware reset occur.
1821 **/
1822static int __devinit igb_probe(struct pci_dev *pdev,
1823 const struct pci_device_id *ent)
1824{
1825 struct net_device *netdev;
1826 struct igb_adapter *adapter;
1827 struct e1000_hw *hw;
1828 u16 eeprom_data = 0;
1829 s32 ret_val;
1830 static int global_quad_port_a; /* global quad port a indication */
1831 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1832 unsigned long mmio_start, mmio_len;
1833 int err, pci_using_dac;
1834 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1835 u8 part_str[E1000_PBANUM_LENGTH];
1836
1837 /* Catch broken hardware that put the wrong VF device ID in
1838 * the PCIe SR-IOV capability.
1839 */
1840 if (pdev->is_virtfn) {
1841 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1842 pci_name(pdev), pdev->vendor, pdev->device);
1843 return -EINVAL;
1844 }
1845
1846 err = pci_enable_device_mem(pdev);
1847 if (err)
1848 return err;
1849
1850 pci_using_dac = 0;
1851 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1852 if (!err) {
1853 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1854 if (!err)
1855 pci_using_dac = 1;
1856 } else {
1857 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1858 if (err) {
1859 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1860 if (err) {
1861 dev_err(&pdev->dev, "No usable DMA "
1862 "configuration, aborting\n");
1863 goto err_dma;
1864 }
1865 }
1866 }
1867
1868 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1869 IORESOURCE_MEM),
1870 igb_driver_name);
1871 if (err)
1872 goto err_pci_reg;
1873
1874 pci_enable_pcie_error_reporting(pdev);
1875
1876 pci_set_master(pdev);
1877 pci_save_state(pdev);
1878
1879 err = -ENOMEM;
1880 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1881 IGB_ABS_MAX_TX_QUEUES);
1882 if (!netdev)
1883 goto err_alloc_etherdev;
1884
1885 SET_NETDEV_DEV(netdev, &pdev->dev);
1886
1887 pci_set_drvdata(pdev, netdev);
1888 adapter = netdev_priv(netdev);
1889 adapter->netdev = netdev;
1890 adapter->pdev = pdev;
1891 hw = &adapter->hw;
1892 hw->back = adapter;
1893 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1894
1895 mmio_start = pci_resource_start(pdev, 0);
1896 mmio_len = pci_resource_len(pdev, 0);
1897
1898 err = -EIO;
1899 hw->hw_addr = ioremap(mmio_start, mmio_len);
1900 if (!hw->hw_addr)
1901 goto err_ioremap;
1902
1903 netdev->netdev_ops = &igb_netdev_ops;
1904 igb_set_ethtool_ops(netdev);
1905 netdev->watchdog_timeo = 5 * HZ;
1906
1907 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1908
1909 netdev->mem_start = mmio_start;
1910 netdev->mem_end = mmio_start + mmio_len;
1911
1912 /* PCI config space info */
1913 hw->vendor_id = pdev->vendor;
1914 hw->device_id = pdev->device;
1915 hw->revision_id = pdev->revision;
1916 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1917 hw->subsystem_device_id = pdev->subsystem_device;
1918
1919 /* Copy the default MAC, PHY and NVM function pointers */
1920 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1921 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1922 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1923 /* Initialize skew-specific constants */
1924 err = ei->get_invariants(hw);
1925 if (err)
1926 goto err_sw_init;
1927
1928 /* setup the private structure */
1929 err = igb_sw_init(adapter);
1930 if (err)
1931 goto err_sw_init;
1932
1933 igb_get_bus_info_pcie(hw);
1934
1935 hw->phy.autoneg_wait_to_complete = false;
1936
1937 /* Copper options */
1938 if (hw->phy.media_type == e1000_media_type_copper) {
1939 hw->phy.mdix = AUTO_ALL_MODES;
1940 hw->phy.disable_polarity_correction = false;
1941 hw->phy.ms_type = e1000_ms_hw_default;
1942 }
1943
1944 if (igb_check_reset_block(hw))
1945 dev_info(&pdev->dev,
1946 "PHY reset is blocked due to SOL/IDER session.\n");
1947
1948 netdev->hw_features = NETIF_F_SG |
1949 NETIF_F_IP_CSUM |
1950 NETIF_F_IPV6_CSUM |
1951 NETIF_F_TSO |
1952 NETIF_F_TSO6 |
1953 NETIF_F_RXCSUM |
1954 NETIF_F_HW_VLAN_RX;
1955
1956 netdev->features = netdev->hw_features |
1957 NETIF_F_HW_VLAN_TX |
1958 NETIF_F_HW_VLAN_FILTER;
1959
1960 netdev->vlan_features |= NETIF_F_TSO;
1961 netdev->vlan_features |= NETIF_F_TSO6;
1962 netdev->vlan_features |= NETIF_F_IP_CSUM;
1963 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1964 netdev->vlan_features |= NETIF_F_SG;
1965
1966 if (pci_using_dac) {
1967 netdev->features |= NETIF_F_HIGHDMA;
1968 netdev->vlan_features |= NETIF_F_HIGHDMA;
1969 }
1970
1971 if (hw->mac.type >= e1000_82576) {
1972 netdev->hw_features |= NETIF_F_SCTP_CSUM;
1973 netdev->features |= NETIF_F_SCTP_CSUM;
1974 }
1975
1976 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1977
1978 /* before reading the NVM, reset the controller to put the device in a
1979 * known good starting state */
1980 hw->mac.ops.reset_hw(hw);
1981
1982 /* make sure the NVM is good */
1983 if (hw->nvm.ops.validate(hw) < 0) {
1984 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1985 err = -EIO;
1986 goto err_eeprom;
1987 }
1988
1989 /* copy the MAC address out of the NVM */
1990 if (hw->mac.ops.read_mac_addr(hw))
1991 dev_err(&pdev->dev, "NVM Read Error\n");
1992
1993 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1994 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1995
1996 if (!is_valid_ether_addr(netdev->perm_addr)) {
1997 dev_err(&pdev->dev, "Invalid MAC Address\n");
1998 err = -EIO;
1999 goto err_eeprom;
2000 }
2001
2002 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2003 (unsigned long) adapter);
2004 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2005 (unsigned long) adapter);
2006
2007 INIT_WORK(&adapter->reset_task, igb_reset_task);
2008 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2009
2010 /* Initialize link properties that are user-changeable */
2011 adapter->fc_autoneg = true;
2012 hw->mac.autoneg = true;
2013 hw->phy.autoneg_advertised = 0x2f;
2014
2015 hw->fc.requested_mode = e1000_fc_default;
2016 hw->fc.current_mode = e1000_fc_default;
2017
2018 igb_validate_mdi_setting(hw);
2019
2020 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2021 * enable the ACPI Magic Packet filter
2022 */
2023
2024 if (hw->bus.func == 0)
2025 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
2026 else if (hw->mac.type >= e1000_82580)
2027 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2028 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2029 &eeprom_data);
2030 else if (hw->bus.func == 1)
2031 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2032
2033 if (eeprom_data & eeprom_apme_mask)
2034 adapter->eeprom_wol |= E1000_WUFC_MAG;
2035
2036 /* now that we have the eeprom settings, apply the special cases where
2037 * the eeprom may be wrong or the board simply won't support wake on
2038 * lan on a particular port */
2039 switch (pdev->device) {
2040 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2041 adapter->eeprom_wol = 0;
2042 break;
2043 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2044 case E1000_DEV_ID_82576_FIBER:
2045 case E1000_DEV_ID_82576_SERDES:
2046 /* Wake events only supported on port A for dual fiber
2047 * regardless of eeprom setting */
2048 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2049 adapter->eeprom_wol = 0;
2050 break;
2051 case E1000_DEV_ID_82576_QUAD_COPPER:
2052 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2053 /* if quad port adapter, disable WoL on all but port A */
2054 if (global_quad_port_a != 0)
2055 adapter->eeprom_wol = 0;
2056 else
2057 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2058 /* Reset for multiple quad port adapters */
2059 if (++global_quad_port_a == 4)
2060 global_quad_port_a = 0;
2061 break;
2062 }
2063
2064 /* initialize the wol settings based on the eeprom settings */
2065 adapter->wol = adapter->eeprom_wol;
2066 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2067
2068 /* reset the hardware with the new settings */
2069 igb_reset(adapter);
2070
2071 /* let the f/w know that the h/w is now under the control of the
2072 * driver. */
2073 igb_get_hw_control(adapter);
2074
2075 strcpy(netdev->name, "eth%d");
2076 err = register_netdev(netdev);
2077 if (err)
2078 goto err_register;
2079
2080 igb_vlan_mode(netdev, netdev->features);
2081
2082 /* carrier off reporting is important to ethtool even BEFORE open */
2083 netif_carrier_off(netdev);
2084
2085#ifdef CONFIG_IGB_DCA
2086 if (dca_add_requester(&pdev->dev) == 0) {
2087 adapter->flags |= IGB_FLAG_DCA_ENABLED;
2088 dev_info(&pdev->dev, "DCA enabled\n");
2089 igb_setup_dca(adapter);
2090 }
2091
2092#endif
2093 /* do hw tstamp init after resetting */
2094 igb_init_hw_timer(adapter);
2095
2096 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2097 /* print bus type/speed/width info */
2098 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2099 netdev->name,
2100 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2101 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2102 "unknown"),
2103 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2104 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2105 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2106 "unknown"),
2107 netdev->dev_addr);
2108
2109 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2110 if (ret_val)
2111 strcpy(part_str, "Unknown");
2112 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2113 dev_info(&pdev->dev,
2114 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2115 adapter->msix_entries ? "MSI-X" :
2116 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2117 adapter->num_rx_queues, adapter->num_tx_queues);
2118 switch (hw->mac.type) {
2119 case e1000_i350:
2120 igb_set_eee_i350(hw);
2121 break;
2122 default:
2123 break;
2124 }
2125 return 0;
2126
2127err_register:
2128 igb_release_hw_control(adapter);
2129err_eeprom:
2130 if (!igb_check_reset_block(hw))
2131 igb_reset_phy(hw);
2132
2133 if (hw->flash_address)
2134 iounmap(hw->flash_address);
2135err_sw_init:
2136 igb_clear_interrupt_scheme(adapter);
2137 iounmap(hw->hw_addr);
2138err_ioremap:
2139 free_netdev(netdev);
2140err_alloc_etherdev:
2141 pci_release_selected_regions(pdev,
2142 pci_select_bars(pdev, IORESOURCE_MEM));
2143err_pci_reg:
2144err_dma:
2145 pci_disable_device(pdev);
2146 return err;
2147}
2148
2149/**
2150 * igb_remove - Device Removal Routine
2151 * @pdev: PCI device information struct
2152 *
2153 * igb_remove is called by the PCI subsystem to alert the driver
2154 * that it should release a PCI device. The could be caused by a
2155 * Hot-Plug event, or because the driver is going to be removed from
2156 * memory.
2157 **/
2158static void __devexit igb_remove(struct pci_dev *pdev)
2159{
2160 struct net_device *netdev = pci_get_drvdata(pdev);
2161 struct igb_adapter *adapter = netdev_priv(netdev);
2162 struct e1000_hw *hw = &adapter->hw;
2163
2164 /*
2165 * The watchdog timer may be rescheduled, so explicitly
2166 * disable watchdog from being rescheduled.
2167 */
2168 set_bit(__IGB_DOWN, &adapter->state);
2169 del_timer_sync(&adapter->watchdog_timer);
2170 del_timer_sync(&adapter->phy_info_timer);
2171
2172 cancel_work_sync(&adapter->reset_task);
2173 cancel_work_sync(&adapter->watchdog_task);
2174
2175#ifdef CONFIG_IGB_DCA
2176 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
2177 dev_info(&pdev->dev, "DCA disabled\n");
2178 dca_remove_requester(&pdev->dev);
2179 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
2180 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
2181 }
2182#endif
2183
2184 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2185 * would have already happened in close and is redundant. */
2186 igb_release_hw_control(adapter);
2187
2188 unregister_netdev(netdev);
2189
2190 igb_clear_interrupt_scheme(adapter);
2191
2192#ifdef CONFIG_PCI_IOV
2193 /* reclaim resources allocated to VFs */
2194 if (adapter->vf_data) {
2195 /* disable iov and allow time for transactions to clear */
2196 pci_disable_sriov(pdev);
2197 msleep(500);
2198
2199 kfree(adapter->vf_data);
2200 adapter->vf_data = NULL;
2201 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2202 wrfl();
2203 msleep(100);
2204 dev_info(&pdev->dev, "IOV Disabled\n");
2205 }
2206#endif
2207
2208 iounmap(hw->hw_addr);
2209 if (hw->flash_address)
2210 iounmap(hw->flash_address);
2211 pci_release_selected_regions(pdev,
2212 pci_select_bars(pdev, IORESOURCE_MEM));
2213
2214 free_netdev(netdev);
2215
2216 pci_disable_pcie_error_reporting(pdev);
2217
2218 pci_disable_device(pdev);
2219}
2220
2221/**
2222 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2223 * @adapter: board private structure to initialize
2224 *
2225 * This function initializes the vf specific data storage and then attempts to
2226 * allocate the VFs. The reason for ordering it this way is because it is much
2227 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2228 * the memory for the VFs.
2229 **/
2230static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2231{
2232#ifdef CONFIG_PCI_IOV
2233 struct pci_dev *pdev = adapter->pdev;
2234
2235 if (adapter->vfs_allocated_count) {
2236 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2237 sizeof(struct vf_data_storage),
2238 GFP_KERNEL);
2239 /* if allocation failed then we do not support SR-IOV */
2240 if (!adapter->vf_data) {
2241 adapter->vfs_allocated_count = 0;
2242 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2243 "Data Storage\n");
2244 }
2245 }
2246
2247 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2248 kfree(adapter->vf_data);
2249 adapter->vf_data = NULL;
2250#endif /* CONFIG_PCI_IOV */
2251 adapter->vfs_allocated_count = 0;
2252#ifdef CONFIG_PCI_IOV
2253 } else {
2254 unsigned char mac_addr[ETH_ALEN];
2255 int i;
2256 dev_info(&pdev->dev, "%d vfs allocated\n",
2257 adapter->vfs_allocated_count);
2258 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2259 random_ether_addr(mac_addr);
2260 igb_set_vf_mac(adapter, i, mac_addr);
2261 }
2262 /* DMA Coalescing is not supported in IOV mode. */
2263 if (adapter->flags & IGB_FLAG_DMAC)
2264 adapter->flags &= ~IGB_FLAG_DMAC;
2265 }
2266#endif /* CONFIG_PCI_IOV */
2267}
2268
2269
2270/**
2271 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2272 * @adapter: board private structure to initialize
2273 *
2274 * igb_init_hw_timer initializes the function pointer and values for the hw
2275 * timer found in hardware.
2276 **/
2277static void igb_init_hw_timer(struct igb_adapter *adapter)
2278{
2279 struct e1000_hw *hw = &adapter->hw;
2280
2281 switch (hw->mac.type) {
2282 case e1000_i350:
2283 case e1000_82580:
2284 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2285 adapter->cycles.read = igb_read_clock;
2286 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2287 adapter->cycles.mult = 1;
2288 /*
2289 * The 82580 timesync updates the system timer every 8ns by 8ns
2290 * and the value cannot be shifted. Instead we need to shift
2291 * the registers to generate a 64bit timer value. As a result
2292 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2293 * 24 in order to generate a larger value for synchronization.
2294 */
2295 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2296 /* disable system timer temporarily by setting bit 31 */
2297 wr32(E1000_TSAUXC, 0x80000000);
2298 wrfl();
2299
2300 /* Set registers so that rollover occurs soon to test this. */
2301 wr32(E1000_SYSTIMR, 0x00000000);
2302 wr32(E1000_SYSTIML, 0x80000000);
2303 wr32(E1000_SYSTIMH, 0x000000FF);
2304 wrfl();
2305
2306 /* enable system timer by clearing bit 31 */
2307 wr32(E1000_TSAUXC, 0x0);
2308 wrfl();
2309
2310 timecounter_init(&adapter->clock,
2311 &adapter->cycles,
2312 ktime_to_ns(ktime_get_real()));
2313 /*
2314 * Synchronize our NIC clock against system wall clock. NIC
2315 * time stamp reading requires ~3us per sample, each sample
2316 * was pretty stable even under load => only require 10
2317 * samples for each offset comparison.
2318 */
2319 memset(&adapter->compare, 0, sizeof(adapter->compare));
2320 adapter->compare.source = &adapter->clock;
2321 adapter->compare.target = ktime_get_real;
2322 adapter->compare.num_samples = 10;
2323 timecompare_update(&adapter->compare, 0);
2324 break;
2325 case e1000_82576:
2326 /*
2327 * Initialize hardware timer: we keep it running just in case
2328 * that some program needs it later on.
2329 */
2330 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2331 adapter->cycles.read = igb_read_clock;
2332 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2333 adapter->cycles.mult = 1;
2334 /**
2335 * Scale the NIC clock cycle by a large factor so that
2336 * relatively small clock corrections can be added or
2337 * subtracted at each clock tick. The drawbacks of a large
2338 * factor are a) that the clock register overflows more quickly
2339 * (not such a big deal) and b) that the increment per tick has
2340 * to fit into 24 bits. As a result we need to use a shift of
2341 * 19 so we can fit a value of 16 into the TIMINCA register.
2342 */
2343 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2344 wr32(E1000_TIMINCA,
2345 (1 << E1000_TIMINCA_16NS_SHIFT) |
2346 (16 << IGB_82576_TSYNC_SHIFT));
2347
2348 /* Set registers so that rollover occurs soon to test this. */
2349 wr32(E1000_SYSTIML, 0x00000000);
2350 wr32(E1000_SYSTIMH, 0xFF800000);
2351 wrfl();
2352
2353 timecounter_init(&adapter->clock,
2354 &adapter->cycles,
2355 ktime_to_ns(ktime_get_real()));
2356 /*
2357 * Synchronize our NIC clock against system wall clock. NIC
2358 * time stamp reading requires ~3us per sample, each sample
2359 * was pretty stable even under load => only require 10
2360 * samples for each offset comparison.
2361 */
2362 memset(&adapter->compare, 0, sizeof(adapter->compare));
2363 adapter->compare.source = &adapter->clock;
2364 adapter->compare.target = ktime_get_real;
2365 adapter->compare.num_samples = 10;
2366 timecompare_update(&adapter->compare, 0);
2367 break;
2368 case e1000_82575:
2369 /* 82575 does not support timesync */
2370 default:
2371 break;
2372 }
2373
2374}
2375
2376/**
2377 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2378 * @adapter: board private structure to initialize
2379 *
2380 * igb_sw_init initializes the Adapter private data structure.
2381 * Fields are initialized based on PCI device information and
2382 * OS network device settings (MTU size).
2383 **/
2384static int __devinit igb_sw_init(struct igb_adapter *adapter)
2385{
2386 struct e1000_hw *hw = &adapter->hw;
2387 struct net_device *netdev = adapter->netdev;
2388 struct pci_dev *pdev = adapter->pdev;
2389
2390 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2391
2392 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2393 adapter->rx_ring_count = IGB_DEFAULT_RXD;
2394 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2395 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2396
2397 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2398 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2399
2400 spin_lock_init(&adapter->stats64_lock);
2401#ifdef CONFIG_PCI_IOV
2402 switch (hw->mac.type) {
2403 case e1000_82576:
2404 case e1000_i350:
2405 if (max_vfs > 7) {
2406 dev_warn(&pdev->dev,
2407 "Maximum of 7 VFs per PF, using max\n");
2408 adapter->vfs_allocated_count = 7;
2409 } else
2410 adapter->vfs_allocated_count = max_vfs;
2411 break;
2412 default:
2413 break;
2414 }
2415#endif /* CONFIG_PCI_IOV */
2416 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2417 /* i350 cannot do RSS and SR-IOV at the same time */
2418 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2419 adapter->rss_queues = 1;
2420
2421 /*
2422 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2423 * then we should combine the queues into a queue pair in order to
2424 * conserve interrupts due to limited supply
2425 */
2426 if ((adapter->rss_queues > 4) ||
2427 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2428 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2429
2430 /* This call may decrease the number of queues */
2431 if (igb_init_interrupt_scheme(adapter)) {
2432 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2433 return -ENOMEM;
2434 }
2435
2436 igb_probe_vfs(adapter);
2437
2438 /* Explicitly disable IRQ since the NIC can be in any state. */
2439 igb_irq_disable(adapter);
2440
2441 if (hw->mac.type == e1000_i350)
2442 adapter->flags &= ~IGB_FLAG_DMAC;
2443
2444 set_bit(__IGB_DOWN, &adapter->state);
2445 return 0;
2446}
2447
2448/**
2449 * igb_open - Called when a network interface is made active
2450 * @netdev: network interface device structure
2451 *
2452 * Returns 0 on success, negative value on failure
2453 *
2454 * The open entry point is called when a network interface is made
2455 * active by the system (IFF_UP). At this point all resources needed
2456 * for transmit and receive operations are allocated, the interrupt
2457 * handler is registered with the OS, the watchdog timer is started,
2458 * and the stack is notified that the interface is ready.
2459 **/
2460static int igb_open(struct net_device *netdev)
2461{
2462 struct igb_adapter *adapter = netdev_priv(netdev);
2463 struct e1000_hw *hw = &adapter->hw;
2464 int err;
2465 int i;
2466
2467 /* disallow open during test */
2468 if (test_bit(__IGB_TESTING, &adapter->state))
2469 return -EBUSY;
2470
2471 netif_carrier_off(netdev);
2472
2473 /* allocate transmit descriptors */
2474 err = igb_setup_all_tx_resources(adapter);
2475 if (err)
2476 goto err_setup_tx;
2477
2478 /* allocate receive descriptors */
2479 err = igb_setup_all_rx_resources(adapter);
2480 if (err)
2481 goto err_setup_rx;
2482
2483 igb_power_up_link(adapter);
2484
2485 /* before we allocate an interrupt, we must be ready to handle it.
2486 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2487 * as soon as we call pci_request_irq, so we have to setup our
2488 * clean_rx handler before we do so. */
2489 igb_configure(adapter);
2490
2491 err = igb_request_irq(adapter);
2492 if (err)
2493 goto err_req_irq;
2494
2495 /* From here on the code is the same as igb_up() */
2496 clear_bit(__IGB_DOWN, &adapter->state);
2497
2498 for (i = 0; i < adapter->num_q_vectors; i++) {
2499 struct igb_q_vector *q_vector = adapter->q_vector[i];
2500 napi_enable(&q_vector->napi);
2501 }
2502
2503 /* Clear any pending interrupts. */
2504 rd32(E1000_ICR);
2505
2506 igb_irq_enable(adapter);
2507
2508 /* notify VFs that reset has been completed */
2509 if (adapter->vfs_allocated_count) {
2510 u32 reg_data = rd32(E1000_CTRL_EXT);
2511 reg_data |= E1000_CTRL_EXT_PFRSTD;
2512 wr32(E1000_CTRL_EXT, reg_data);
2513 }
2514
2515 netif_tx_start_all_queues(netdev);
2516
2517 /* start the watchdog. */
2518 hw->mac.get_link_status = 1;
2519 schedule_work(&adapter->watchdog_task);
2520
2521 return 0;
2522
2523err_req_irq:
2524 igb_release_hw_control(adapter);
2525 igb_power_down_link(adapter);
2526 igb_free_all_rx_resources(adapter);
2527err_setup_rx:
2528 igb_free_all_tx_resources(adapter);
2529err_setup_tx:
2530 igb_reset(adapter);
2531
2532 return err;
2533}
2534
2535/**
2536 * igb_close - Disables a network interface
2537 * @netdev: network interface device structure
2538 *
2539 * Returns 0, this is not allowed to fail
2540 *
2541 * The close entry point is called when an interface is de-activated
2542 * by the OS. The hardware is still under the driver's control, but
2543 * needs to be disabled. A global MAC reset is issued to stop the
2544 * hardware, and all transmit and receive resources are freed.
2545 **/
2546static int igb_close(struct net_device *netdev)
2547{
2548 struct igb_adapter *adapter = netdev_priv(netdev);
2549
2550 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2551 igb_down(adapter);
2552
2553 igb_free_irq(adapter);
2554
2555 igb_free_all_tx_resources(adapter);
2556 igb_free_all_rx_resources(adapter);
2557
2558 return 0;
2559}
2560
2561/**
2562 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2563 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2564 *
2565 * Return 0 on success, negative on failure
2566 **/
2567int igb_setup_tx_resources(struct igb_ring *tx_ring)
2568{
2569 struct device *dev = tx_ring->dev;
2570 int size;
2571
2572 size = sizeof(struct igb_buffer) * tx_ring->count;
2573 tx_ring->buffer_info = vzalloc(size);
2574 if (!tx_ring->buffer_info)
2575 goto err;
2576
2577 /* round up to nearest 4K */
2578 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2579 tx_ring->size = ALIGN(tx_ring->size, 4096);
2580
2581 tx_ring->desc = dma_alloc_coherent(dev,
2582 tx_ring->size,
2583 &tx_ring->dma,
2584 GFP_KERNEL);
2585
2586 if (!tx_ring->desc)
2587 goto err;
2588
2589 tx_ring->next_to_use = 0;
2590 tx_ring->next_to_clean = 0;
2591 return 0;
2592
2593err:
2594 vfree(tx_ring->buffer_info);
2595 dev_err(dev,
2596 "Unable to allocate memory for the transmit descriptor ring\n");
2597 return -ENOMEM;
2598}
2599
2600/**
2601 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2602 * (Descriptors) for all queues
2603 * @adapter: board private structure
2604 *
2605 * Return 0 on success, negative on failure
2606 **/
2607static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2608{
2609 struct pci_dev *pdev = adapter->pdev;
2610 int i, err = 0;
2611
2612 for (i = 0; i < adapter->num_tx_queues; i++) {
2613 err = igb_setup_tx_resources(adapter->tx_ring[i]);
2614 if (err) {
2615 dev_err(&pdev->dev,
2616 "Allocation for Tx Queue %u failed\n", i);
2617 for (i--; i >= 0; i--)
2618 igb_free_tx_resources(adapter->tx_ring[i]);
2619 break;
2620 }
2621 }
2622
2623 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2624 int r_idx = i % adapter->num_tx_queues;
2625 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2626 }
2627 return err;
2628}
2629
2630/**
2631 * igb_setup_tctl - configure the transmit control registers
2632 * @adapter: Board private structure
2633 **/
2634void igb_setup_tctl(struct igb_adapter *adapter)
2635{
2636 struct e1000_hw *hw = &adapter->hw;
2637 u32 tctl;
2638
2639 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2640 wr32(E1000_TXDCTL(0), 0);
2641
2642 /* Program the Transmit Control Register */
2643 tctl = rd32(E1000_TCTL);
2644 tctl &= ~E1000_TCTL_CT;
2645 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2646 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2647
2648 igb_config_collision_dist(hw);
2649
2650 /* Enable transmits */
2651 tctl |= E1000_TCTL_EN;
2652
2653 wr32(E1000_TCTL, tctl);
2654}
2655
2656/**
2657 * igb_configure_tx_ring - Configure transmit ring after Reset
2658 * @adapter: board private structure
2659 * @ring: tx ring to configure
2660 *
2661 * Configure a transmit ring after a reset.
2662 **/
2663void igb_configure_tx_ring(struct igb_adapter *adapter,
2664 struct igb_ring *ring)
2665{
2666 struct e1000_hw *hw = &adapter->hw;
2667 u32 txdctl;
2668 u64 tdba = ring->dma;
2669 int reg_idx = ring->reg_idx;
2670
2671 /* disable the queue */
2672 txdctl = rd32(E1000_TXDCTL(reg_idx));
2673 wr32(E1000_TXDCTL(reg_idx),
2674 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2675 wrfl();
2676 mdelay(10);
2677
2678 wr32(E1000_TDLEN(reg_idx),
2679 ring->count * sizeof(union e1000_adv_tx_desc));
2680 wr32(E1000_TDBAL(reg_idx),
2681 tdba & 0x00000000ffffffffULL);
2682 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2683
2684 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2685 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2686 writel(0, ring->head);
2687 writel(0, ring->tail);
2688
2689 txdctl |= IGB_TX_PTHRESH;
2690 txdctl |= IGB_TX_HTHRESH << 8;
2691 txdctl |= IGB_TX_WTHRESH << 16;
2692
2693 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2694 wr32(E1000_TXDCTL(reg_idx), txdctl);
2695}
2696
2697/**
2698 * igb_configure_tx - Configure transmit Unit after Reset
2699 * @adapter: board private structure
2700 *
2701 * Configure the Tx unit of the MAC after a reset.
2702 **/
2703static void igb_configure_tx(struct igb_adapter *adapter)
2704{
2705 int i;
2706
2707 for (i = 0; i < adapter->num_tx_queues; i++)
2708 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2709}
2710
2711/**
2712 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2713 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2714 *
2715 * Returns 0 on success, negative on failure
2716 **/
2717int igb_setup_rx_resources(struct igb_ring *rx_ring)
2718{
2719 struct device *dev = rx_ring->dev;
2720 int size, desc_len;
2721
2722 size = sizeof(struct igb_buffer) * rx_ring->count;
2723 rx_ring->buffer_info = vzalloc(size);
2724 if (!rx_ring->buffer_info)
2725 goto err;
2726
2727 desc_len = sizeof(union e1000_adv_rx_desc);
2728
2729 /* Round up to nearest 4K */
2730 rx_ring->size = rx_ring->count * desc_len;
2731 rx_ring->size = ALIGN(rx_ring->size, 4096);
2732
2733 rx_ring->desc = dma_alloc_coherent(dev,
2734 rx_ring->size,
2735 &rx_ring->dma,
2736 GFP_KERNEL);
2737
2738 if (!rx_ring->desc)
2739 goto err;
2740
2741 rx_ring->next_to_clean = 0;
2742 rx_ring->next_to_use = 0;
2743
2744 return 0;
2745
2746err:
2747 vfree(rx_ring->buffer_info);
2748 rx_ring->buffer_info = NULL;
2749 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2750 " ring\n");
2751 return -ENOMEM;
2752}
2753
2754/**
2755 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2756 * (Descriptors) for all queues
2757 * @adapter: board private structure
2758 *
2759 * Return 0 on success, negative on failure
2760 **/
2761static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2762{
2763 struct pci_dev *pdev = adapter->pdev;
2764 int i, err = 0;
2765
2766 for (i = 0; i < adapter->num_rx_queues; i++) {
2767 err = igb_setup_rx_resources(adapter->rx_ring[i]);
2768 if (err) {
2769 dev_err(&pdev->dev,
2770 "Allocation for Rx Queue %u failed\n", i);
2771 for (i--; i >= 0; i--)
2772 igb_free_rx_resources(adapter->rx_ring[i]);
2773 break;
2774 }
2775 }
2776
2777 return err;
2778}
2779
2780/**
2781 * igb_setup_mrqc - configure the multiple receive queue control registers
2782 * @adapter: Board private structure
2783 **/
2784static void igb_setup_mrqc(struct igb_adapter *adapter)
2785{
2786 struct e1000_hw *hw = &adapter->hw;
2787 u32 mrqc, rxcsum;
2788 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2789 union e1000_reta {
2790 u32 dword;
2791 u8 bytes[4];
2792 } reta;
2793 static const u8 rsshash[40] = {
2794 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2795 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2796 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2797 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2798
2799 /* Fill out hash function seeds */
2800 for (j = 0; j < 10; j++) {
2801 u32 rsskey = rsshash[(j * 4)];
2802 rsskey |= rsshash[(j * 4) + 1] << 8;
2803 rsskey |= rsshash[(j * 4) + 2] << 16;
2804 rsskey |= rsshash[(j * 4) + 3] << 24;
2805 array_wr32(E1000_RSSRK(0), j, rsskey);
2806 }
2807
2808 num_rx_queues = adapter->rss_queues;
2809
2810 if (adapter->vfs_allocated_count) {
2811 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2812 switch (hw->mac.type) {
2813 case e1000_i350:
2814 case e1000_82580:
2815 num_rx_queues = 1;
2816 shift = 0;
2817 break;
2818 case e1000_82576:
2819 shift = 3;
2820 num_rx_queues = 2;
2821 break;
2822 case e1000_82575:
2823 shift = 2;
2824 shift2 = 6;
2825 default:
2826 break;
2827 }
2828 } else {
2829 if (hw->mac.type == e1000_82575)
2830 shift = 6;
2831 }
2832
2833 for (j = 0; j < (32 * 4); j++) {
2834 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2835 if (shift2)
2836 reta.bytes[j & 3] |= num_rx_queues << shift2;
2837 if ((j & 3) == 3)
2838 wr32(E1000_RETA(j >> 2), reta.dword);
2839 }
2840
2841 /*
2842 * Disable raw packet checksumming so that RSS hash is placed in
2843 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2844 * offloads as they are enabled by default
2845 */
2846 rxcsum = rd32(E1000_RXCSUM);
2847 rxcsum |= E1000_RXCSUM_PCSD;
2848
2849 if (adapter->hw.mac.type >= e1000_82576)
2850 /* Enable Receive Checksum Offload for SCTP */
2851 rxcsum |= E1000_RXCSUM_CRCOFL;
2852
2853 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2854 wr32(E1000_RXCSUM, rxcsum);
2855
2856 /* If VMDq is enabled then we set the appropriate mode for that, else
2857 * we default to RSS so that an RSS hash is calculated per packet even
2858 * if we are only using one queue */
2859 if (adapter->vfs_allocated_count) {
2860 if (hw->mac.type > e1000_82575) {
2861 /* Set the default pool for the PF's first queue */
2862 u32 vtctl = rd32(E1000_VT_CTL);
2863 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2864 E1000_VT_CTL_DISABLE_DEF_POOL);
2865 vtctl |= adapter->vfs_allocated_count <<
2866 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2867 wr32(E1000_VT_CTL, vtctl);
2868 }
2869 if (adapter->rss_queues > 1)
2870 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2871 else
2872 mrqc = E1000_MRQC_ENABLE_VMDQ;
2873 } else {
2874 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2875 }
2876 igb_vmm_control(adapter);
2877
2878 /*
2879 * Generate RSS hash based on TCP port numbers and/or
2880 * IPv4/v6 src and dst addresses since UDP cannot be
2881 * hashed reliably due to IP fragmentation
2882 */
2883 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2884 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2885 E1000_MRQC_RSS_FIELD_IPV6 |
2886 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2887 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2888
2889 wr32(E1000_MRQC, mrqc);
2890}
2891
2892/**
2893 * igb_setup_rctl - configure the receive control registers
2894 * @adapter: Board private structure
2895 **/
2896void igb_setup_rctl(struct igb_adapter *adapter)
2897{
2898 struct e1000_hw *hw = &adapter->hw;
2899 u32 rctl;
2900
2901 rctl = rd32(E1000_RCTL);
2902
2903 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2904 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2905
2906 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2907 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2908
2909 /*
2910 * enable stripping of CRC. It's unlikely this will break BMC
2911 * redirection as it did with e1000. Newer features require
2912 * that the HW strips the CRC.
2913 */
2914 rctl |= E1000_RCTL_SECRC;
2915
2916 /* disable store bad packets and clear size bits. */
2917 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2918
2919 /* enable LPE to prevent packets larger than max_frame_size */
2920 rctl |= E1000_RCTL_LPE;
2921
2922 /* disable queue 0 to prevent tail write w/o re-config */
2923 wr32(E1000_RXDCTL(0), 0);
2924
2925 /* Attention!!! For SR-IOV PF driver operations you must enable
2926 * queue drop for all VF and PF queues to prevent head of line blocking
2927 * if an un-trusted VF does not provide descriptors to hardware.
2928 */
2929 if (adapter->vfs_allocated_count) {
2930 /* set all queue drop enable bits */
2931 wr32(E1000_QDE, ALL_QUEUES);
2932 }
2933
2934 wr32(E1000_RCTL, rctl);
2935}
2936
2937static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2938 int vfn)
2939{
2940 struct e1000_hw *hw = &adapter->hw;
2941 u32 vmolr;
2942
2943 /* if it isn't the PF check to see if VFs are enabled and
2944 * increase the size to support vlan tags */
2945 if (vfn < adapter->vfs_allocated_count &&
2946 adapter->vf_data[vfn].vlans_enabled)
2947 size += VLAN_TAG_SIZE;
2948
2949 vmolr = rd32(E1000_VMOLR(vfn));
2950 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2951 vmolr |= size | E1000_VMOLR_LPE;
2952 wr32(E1000_VMOLR(vfn), vmolr);
2953
2954 return 0;
2955}
2956
2957/**
2958 * igb_rlpml_set - set maximum receive packet size
2959 * @adapter: board private structure
2960 *
2961 * Configure maximum receivable packet size.
2962 **/
2963static void igb_rlpml_set(struct igb_adapter *adapter)
2964{
2965 u32 max_frame_size;
2966 struct e1000_hw *hw = &adapter->hw;
2967 u16 pf_id = adapter->vfs_allocated_count;
2968
2969 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
2970
2971 /* if vfs are enabled we set RLPML to the largest possible request
2972 * size and set the VMOLR RLPML to the size we need */
2973 if (pf_id) {
2974 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2975 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2976 }
2977
2978 wr32(E1000_RLPML, max_frame_size);
2979}
2980
2981static inline void igb_set_vmolr(struct igb_adapter *adapter,
2982 int vfn, bool aupe)
2983{
2984 struct e1000_hw *hw = &adapter->hw;
2985 u32 vmolr;
2986
2987 /*
2988 * This register exists only on 82576 and newer so if we are older then
2989 * we should exit and do nothing
2990 */
2991 if (hw->mac.type < e1000_82576)
2992 return;
2993
2994 vmolr = rd32(E1000_VMOLR(vfn));
2995 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2996 if (aupe)
2997 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2998 else
2999 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3000
3001 /* clear all bits that might not be set */
3002 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3003
3004 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3005 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3006 /*
3007 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3008 * multicast packets
3009 */
3010 if (vfn <= adapter->vfs_allocated_count)
3011 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3012
3013 wr32(E1000_VMOLR(vfn), vmolr);
3014}
3015
3016/**
3017 * igb_configure_rx_ring - Configure a receive ring after Reset
3018 * @adapter: board private structure
3019 * @ring: receive ring to be configured
3020 *
3021 * Configure the Rx unit of the MAC after a reset.
3022 **/
3023void igb_configure_rx_ring(struct igb_adapter *adapter,
3024 struct igb_ring *ring)
3025{
3026 struct e1000_hw *hw = &adapter->hw;
3027 u64 rdba = ring->dma;
3028 int reg_idx = ring->reg_idx;
3029 u32 srrctl, rxdctl;
3030
3031 /* disable the queue */
3032 rxdctl = rd32(E1000_RXDCTL(reg_idx));
3033 wr32(E1000_RXDCTL(reg_idx),
3034 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
3035
3036 /* Set DMA base address registers */
3037 wr32(E1000_RDBAL(reg_idx),
3038 rdba & 0x00000000ffffffffULL);
3039 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3040 wr32(E1000_RDLEN(reg_idx),
3041 ring->count * sizeof(union e1000_adv_rx_desc));
3042
3043 /* initialize head and tail */
3044 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
3045 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
3046 writel(0, ring->head);
3047 writel(0, ring->tail);
3048
3049 /* set descriptor configuration */
3050 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
3051 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
3052 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3053#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3054 srrctl |= IGB_RXBUFFER_16384 >>
3055 E1000_SRRCTL_BSIZEPKT_SHIFT;
3056#else
3057 srrctl |= (PAGE_SIZE / 2) >>
3058 E1000_SRRCTL_BSIZEPKT_SHIFT;
3059#endif
3060 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3061 } else {
3062 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
3063 E1000_SRRCTL_BSIZEPKT_SHIFT;
3064 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3065 }
3066 if (hw->mac.type == e1000_82580)
3067 srrctl |= E1000_SRRCTL_TIMESTAMP;
3068 /* Only set Drop Enable if we are supporting multiple queues */
3069 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3070 srrctl |= E1000_SRRCTL_DROP_EN;
3071
3072 wr32(E1000_SRRCTL(reg_idx), srrctl);
3073
3074 /* set filtering for VMDQ pools */
3075 igb_set_vmolr(adapter, reg_idx & 0x7, true);
3076
3077 /* enable receive descriptor fetching */
3078 rxdctl = rd32(E1000_RXDCTL(reg_idx));
3079 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3080 rxdctl &= 0xFFF00000;
3081 rxdctl |= IGB_RX_PTHRESH;
3082 rxdctl |= IGB_RX_HTHRESH << 8;
3083 rxdctl |= IGB_RX_WTHRESH << 16;
3084 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3085}
3086
3087/**
3088 * igb_configure_rx - Configure receive Unit after Reset
3089 * @adapter: board private structure
3090 *
3091 * Configure the Rx unit of the MAC after a reset.
3092 **/
3093static void igb_configure_rx(struct igb_adapter *adapter)
3094{
3095 int i;
3096
3097 /* set UTA to appropriate mode */
3098 igb_set_uta(adapter);
3099
3100 /* set the correct pool for the PF default MAC address in entry 0 */
3101 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3102 adapter->vfs_allocated_count);
3103
3104 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3105 * the Base and Length of the Rx Descriptor Ring */
3106 for (i = 0; i < adapter->num_rx_queues; i++)
3107 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3108}
3109
3110/**
3111 * igb_free_tx_resources - Free Tx Resources per Queue
3112 * @tx_ring: Tx descriptor ring for a specific queue
3113 *
3114 * Free all transmit software resources
3115 **/
3116void igb_free_tx_resources(struct igb_ring *tx_ring)
3117{
3118 igb_clean_tx_ring(tx_ring);
3119
3120 vfree(tx_ring->buffer_info);
3121 tx_ring->buffer_info = NULL;
3122
3123 /* if not set, then don't free */
3124 if (!tx_ring->desc)
3125 return;
3126
3127 dma_free_coherent(tx_ring->dev, tx_ring->size,
3128 tx_ring->desc, tx_ring->dma);
3129
3130 tx_ring->desc = NULL;
3131}
3132
3133/**
3134 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3135 * @adapter: board private structure
3136 *
3137 * Free all transmit software resources
3138 **/
3139static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3140{
3141 int i;
3142
3143 for (i = 0; i < adapter->num_tx_queues; i++)
3144 igb_free_tx_resources(adapter->tx_ring[i]);
3145}
3146
3147void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
3148 struct igb_buffer *buffer_info)
3149{
3150 if (buffer_info->dma) {
3151 if (buffer_info->mapped_as_page)
3152 dma_unmap_page(tx_ring->dev,
3153 buffer_info->dma,
3154 buffer_info->length,
3155 DMA_TO_DEVICE);
3156 else
3157 dma_unmap_single(tx_ring->dev,
3158 buffer_info->dma,
3159 buffer_info->length,
3160 DMA_TO_DEVICE);
3161 buffer_info->dma = 0;
3162 }
3163 if (buffer_info->skb) {
3164 dev_kfree_skb_any(buffer_info->skb);
3165 buffer_info->skb = NULL;
3166 }
3167 buffer_info->time_stamp = 0;
3168 buffer_info->length = 0;
3169 buffer_info->next_to_watch = 0;
3170 buffer_info->mapped_as_page = false;
3171}
3172
3173/**
3174 * igb_clean_tx_ring - Free Tx Buffers
3175 * @tx_ring: ring to be cleaned
3176 **/
3177static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3178{
3179 struct igb_buffer *buffer_info;
3180 unsigned long size;
3181 unsigned int i;
3182
3183 if (!tx_ring->buffer_info)
3184 return;
3185 /* Free all the Tx ring sk_buffs */
3186
3187 for (i = 0; i < tx_ring->count; i++) {
3188 buffer_info = &tx_ring->buffer_info[i];
3189 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3190 }
3191
3192 size = sizeof(struct igb_buffer) * tx_ring->count;
3193 memset(tx_ring->buffer_info, 0, size);
3194
3195 /* Zero out the descriptor ring */
3196 memset(tx_ring->desc, 0, tx_ring->size);
3197
3198 tx_ring->next_to_use = 0;
3199 tx_ring->next_to_clean = 0;
3200}
3201
3202/**
3203 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3204 * @adapter: board private structure
3205 **/
3206static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3207{
3208 int i;
3209
3210 for (i = 0; i < adapter->num_tx_queues; i++)
3211 igb_clean_tx_ring(adapter->tx_ring[i]);
3212}
3213
3214/**
3215 * igb_free_rx_resources - Free Rx Resources
3216 * @rx_ring: ring to clean the resources from
3217 *
3218 * Free all receive software resources
3219 **/
3220void igb_free_rx_resources(struct igb_ring *rx_ring)
3221{
3222 igb_clean_rx_ring(rx_ring);
3223
3224 vfree(rx_ring->buffer_info);
3225 rx_ring->buffer_info = NULL;
3226
3227 /* if not set, then don't free */
3228 if (!rx_ring->desc)
3229 return;
3230
3231 dma_free_coherent(rx_ring->dev, rx_ring->size,
3232 rx_ring->desc, rx_ring->dma);
3233
3234 rx_ring->desc = NULL;
3235}
3236
3237/**
3238 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3239 * @adapter: board private structure
3240 *
3241 * Free all receive software resources
3242 **/
3243static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3244{
3245 int i;
3246
3247 for (i = 0; i < adapter->num_rx_queues; i++)
3248 igb_free_rx_resources(adapter->rx_ring[i]);
3249}
3250
3251/**
3252 * igb_clean_rx_ring - Free Rx Buffers per Queue
3253 * @rx_ring: ring to free buffers from
3254 **/
3255static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3256{
3257 struct igb_buffer *buffer_info;
3258 unsigned long size;
3259 unsigned int i;
3260
3261 if (!rx_ring->buffer_info)
3262 return;
3263
3264 /* Free all the Rx ring sk_buffs */
3265 for (i = 0; i < rx_ring->count; i++) {
3266 buffer_info = &rx_ring->buffer_info[i];
3267 if (buffer_info->dma) {
3268 dma_unmap_single(rx_ring->dev,
3269 buffer_info->dma,
3270 rx_ring->rx_buffer_len,
3271 DMA_FROM_DEVICE);
3272 buffer_info->dma = 0;
3273 }
3274
3275 if (buffer_info->skb) {
3276 dev_kfree_skb(buffer_info->skb);
3277 buffer_info->skb = NULL;
3278 }
3279 if (buffer_info->page_dma) {
3280 dma_unmap_page(rx_ring->dev,
3281 buffer_info->page_dma,
3282 PAGE_SIZE / 2,
3283 DMA_FROM_DEVICE);
3284 buffer_info->page_dma = 0;
3285 }
3286 if (buffer_info->page) {
3287 put_page(buffer_info->page);
3288 buffer_info->page = NULL;
3289 buffer_info->page_offset = 0;
3290 }
3291 }
3292
3293 size = sizeof(struct igb_buffer) * rx_ring->count;
3294 memset(rx_ring->buffer_info, 0, size);
3295
3296 /* Zero out the descriptor ring */
3297 memset(rx_ring->desc, 0, rx_ring->size);
3298
3299 rx_ring->next_to_clean = 0;
3300 rx_ring->next_to_use = 0;
3301}
3302
3303/**
3304 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3305 * @adapter: board private structure
3306 **/
3307static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3308{
3309 int i;
3310
3311 for (i = 0; i < adapter->num_rx_queues; i++)
3312 igb_clean_rx_ring(adapter->rx_ring[i]);
3313}
3314
3315/**
3316 * igb_set_mac - Change the Ethernet Address of the NIC
3317 * @netdev: network interface device structure
3318 * @p: pointer to an address structure
3319 *
3320 * Returns 0 on success, negative on failure
3321 **/
3322static int igb_set_mac(struct net_device *netdev, void *p)
3323{
3324 struct igb_adapter *adapter = netdev_priv(netdev);
3325 struct e1000_hw *hw = &adapter->hw;
3326 struct sockaddr *addr = p;
3327
3328 if (!is_valid_ether_addr(addr->sa_data))
3329 return -EADDRNOTAVAIL;
3330
3331 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3332 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3333
3334 /* set the correct pool for the new PF MAC address in entry 0 */
3335 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3336 adapter->vfs_allocated_count);
3337
3338 return 0;
3339}
3340
3341/**
3342 * igb_write_mc_addr_list - write multicast addresses to MTA
3343 * @netdev: network interface device structure
3344 *
3345 * Writes multicast address list to the MTA hash table.
3346 * Returns: -ENOMEM on failure
3347 * 0 on no addresses written
3348 * X on writing X addresses to MTA
3349 **/
3350static int igb_write_mc_addr_list(struct net_device *netdev)
3351{
3352 struct igb_adapter *adapter = netdev_priv(netdev);
3353 struct e1000_hw *hw = &adapter->hw;
3354 struct netdev_hw_addr *ha;
3355 u8 *mta_list;
3356 int i;
3357
3358 if (netdev_mc_empty(netdev)) {
3359 /* nothing to program, so clear mc list */
3360 igb_update_mc_addr_list(hw, NULL, 0);
3361 igb_restore_vf_multicasts(adapter);
3362 return 0;
3363 }
3364
3365 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3366 if (!mta_list)
3367 return -ENOMEM;
3368
3369 /* The shared function expects a packed array of only addresses. */
3370 i = 0;
3371 netdev_for_each_mc_addr(ha, netdev)
3372 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3373
3374 igb_update_mc_addr_list(hw, mta_list, i);
3375 kfree(mta_list);
3376
3377 return netdev_mc_count(netdev);
3378}
3379
3380/**
3381 * igb_write_uc_addr_list - write unicast addresses to RAR table
3382 * @netdev: network interface device structure
3383 *
3384 * Writes unicast address list to the RAR table.
3385 * Returns: -ENOMEM on failure/insufficient address space
3386 * 0 on no addresses written
3387 * X on writing X addresses to the RAR table
3388 **/
3389static int igb_write_uc_addr_list(struct net_device *netdev)
3390{
3391 struct igb_adapter *adapter = netdev_priv(netdev);
3392 struct e1000_hw *hw = &adapter->hw;
3393 unsigned int vfn = adapter->vfs_allocated_count;
3394 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3395 int count = 0;
3396
3397 /* return ENOMEM indicating insufficient memory for addresses */
3398 if (netdev_uc_count(netdev) > rar_entries)
3399 return -ENOMEM;
3400
3401 if (!netdev_uc_empty(netdev) && rar_entries) {
3402 struct netdev_hw_addr *ha;
3403
3404 netdev_for_each_uc_addr(ha, netdev) {
3405 if (!rar_entries)
3406 break;
3407 igb_rar_set_qsel(adapter, ha->addr,
3408 rar_entries--,
3409 vfn);
3410 count++;
3411 }
3412 }
3413 /* write the addresses in reverse order to avoid write combining */
3414 for (; rar_entries > 0 ; rar_entries--) {
3415 wr32(E1000_RAH(rar_entries), 0);
3416 wr32(E1000_RAL(rar_entries), 0);
3417 }
3418 wrfl();
3419
3420 return count;
3421}
3422
3423/**
3424 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3425 * @netdev: network interface device structure
3426 *
3427 * The set_rx_mode entry point is called whenever the unicast or multicast
3428 * address lists or the network interface flags are updated. This routine is
3429 * responsible for configuring the hardware for proper unicast, multicast,
3430 * promiscuous mode, and all-multi behavior.
3431 **/
3432static void igb_set_rx_mode(struct net_device *netdev)
3433{
3434 struct igb_adapter *adapter = netdev_priv(netdev);
3435 struct e1000_hw *hw = &adapter->hw;
3436 unsigned int vfn = adapter->vfs_allocated_count;
3437 u32 rctl, vmolr = 0;
3438 int count;
3439
3440 /* Check for Promiscuous and All Multicast modes */
3441 rctl = rd32(E1000_RCTL);
3442
3443 /* clear the effected bits */
3444 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3445
3446 if (netdev->flags & IFF_PROMISC) {
3447 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3448 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3449 } else {
3450 if (netdev->flags & IFF_ALLMULTI) {
3451 rctl |= E1000_RCTL_MPE;
3452 vmolr |= E1000_VMOLR_MPME;
3453 } else {
3454 /*
3455 * Write addresses to the MTA, if the attempt fails
3456 * then we should just turn on promiscuous mode so
3457 * that we can at least receive multicast traffic
3458 */
3459 count = igb_write_mc_addr_list(netdev);
3460 if (count < 0) {
3461 rctl |= E1000_RCTL_MPE;
3462 vmolr |= E1000_VMOLR_MPME;
3463 } else if (count) {
3464 vmolr |= E1000_VMOLR_ROMPE;
3465 }
3466 }
3467 /*
3468 * Write addresses to available RAR registers, if there is not
3469 * sufficient space to store all the addresses then enable
3470 * unicast promiscuous mode
3471 */
3472 count = igb_write_uc_addr_list(netdev);
3473 if (count < 0) {
3474 rctl |= E1000_RCTL_UPE;
3475 vmolr |= E1000_VMOLR_ROPE;
3476 }
3477 rctl |= E1000_RCTL_VFE;
3478 }
3479 wr32(E1000_RCTL, rctl);
3480
3481 /*
3482 * In order to support SR-IOV and eventually VMDq it is necessary to set
3483 * the VMOLR to enable the appropriate modes. Without this workaround
3484 * we will have issues with VLAN tag stripping not being done for frames
3485 * that are only arriving because we are the default pool
3486 */
3487 if (hw->mac.type < e1000_82576)
3488 return;
3489
3490 vmolr |= rd32(E1000_VMOLR(vfn)) &
3491 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3492 wr32(E1000_VMOLR(vfn), vmolr);
3493 igb_restore_vf_multicasts(adapter);
3494}
3495
3496static void igb_check_wvbr(struct igb_adapter *adapter)
3497{
3498 struct e1000_hw *hw = &adapter->hw;
3499 u32 wvbr = 0;
3500
3501 switch (hw->mac.type) {
3502 case e1000_82576:
3503 case e1000_i350:
3504 if (!(wvbr = rd32(E1000_WVBR)))
3505 return;
3506 break;
3507 default:
3508 break;
3509 }
3510
3511 adapter->wvbr |= wvbr;
3512}
3513
3514#define IGB_STAGGERED_QUEUE_OFFSET 8
3515
3516static void igb_spoof_check(struct igb_adapter *adapter)
3517{
3518 int j;
3519
3520 if (!adapter->wvbr)
3521 return;
3522
3523 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3524 if (adapter->wvbr & (1 << j) ||
3525 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3526 dev_warn(&adapter->pdev->dev,
3527 "Spoof event(s) detected on VF %d\n", j);
3528 adapter->wvbr &=
3529 ~((1 << j) |
3530 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3531 }
3532 }
3533}
3534
3535/* Need to wait a few seconds after link up to get diagnostic information from
3536 * the phy */
3537static void igb_update_phy_info(unsigned long data)
3538{
3539 struct igb_adapter *adapter = (struct igb_adapter *) data;
3540 igb_get_phy_info(&adapter->hw);
3541}
3542
3543/**
3544 * igb_has_link - check shared code for link and determine up/down
3545 * @adapter: pointer to driver private info
3546 **/
3547bool igb_has_link(struct igb_adapter *adapter)
3548{
3549 struct e1000_hw *hw = &adapter->hw;
3550 bool link_active = false;
3551 s32 ret_val = 0;
3552
3553 /* get_link_status is set on LSC (link status) interrupt or
3554 * rx sequence error interrupt. get_link_status will stay
3555 * false until the e1000_check_for_link establishes link
3556 * for copper adapters ONLY
3557 */
3558 switch (hw->phy.media_type) {
3559 case e1000_media_type_copper:
3560 if (hw->mac.get_link_status) {
3561 ret_val = hw->mac.ops.check_for_link(hw);
3562 link_active = !hw->mac.get_link_status;
3563 } else {
3564 link_active = true;
3565 }
3566 break;
3567 case e1000_media_type_internal_serdes:
3568 ret_val = hw->mac.ops.check_for_link(hw);
3569 link_active = hw->mac.serdes_has_link;
3570 break;
3571 default:
3572 case e1000_media_type_unknown:
3573 break;
3574 }
3575
3576 return link_active;
3577}
3578
3579static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3580{
3581 bool ret = false;
3582 u32 ctrl_ext, thstat;
3583
3584 /* check for thermal sensor event on i350, copper only */
3585 if (hw->mac.type == e1000_i350) {
3586 thstat = rd32(E1000_THSTAT);
3587 ctrl_ext = rd32(E1000_CTRL_EXT);
3588
3589 if ((hw->phy.media_type == e1000_media_type_copper) &&
3590 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3591 ret = !!(thstat & event);
3592 }
3593 }
3594
3595 return ret;
3596}
3597
3598/**
3599 * igb_watchdog - Timer Call-back
3600 * @data: pointer to adapter cast into an unsigned long
3601 **/
3602static void igb_watchdog(unsigned long data)
3603{
3604 struct igb_adapter *adapter = (struct igb_adapter *)data;
3605 /* Do the rest outside of interrupt context */
3606 schedule_work(&adapter->watchdog_task);
3607}
3608
3609static void igb_watchdog_task(struct work_struct *work)
3610{
3611 struct igb_adapter *adapter = container_of(work,
3612 struct igb_adapter,
3613 watchdog_task);
3614 struct e1000_hw *hw = &adapter->hw;
3615 struct net_device *netdev = adapter->netdev;
3616 u32 link;
3617 int i;
3618
3619 link = igb_has_link(adapter);
3620 if (link) {
3621 if (!netif_carrier_ok(netdev)) {
3622 u32 ctrl;
3623 hw->mac.ops.get_speed_and_duplex(hw,
3624 &adapter->link_speed,
3625 &adapter->link_duplex);
3626
3627 ctrl = rd32(E1000_CTRL);
3628 /* Links status message must follow this format */
3629 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
3630 "Flow Control: %s\n",
3631 netdev->name,
3632 adapter->link_speed,
3633 adapter->link_duplex == FULL_DUPLEX ?
3634 "Full Duplex" : "Half Duplex",
3635 ((ctrl & E1000_CTRL_TFCE) &&
3636 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3637 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3638 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3639
3640 /* check for thermal sensor event */
3641 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3642 printk(KERN_INFO "igb: %s The network adapter "
3643 "link speed was downshifted "
3644 "because it overheated.\n",
3645 netdev->name);
3646 }
3647
3648 /* adjust timeout factor according to speed/duplex */
3649 adapter->tx_timeout_factor = 1;
3650 switch (adapter->link_speed) {
3651 case SPEED_10:
3652 adapter->tx_timeout_factor = 14;
3653 break;
3654 case SPEED_100:
3655 /* maybe add some timeout factor ? */
3656 break;
3657 }
3658
3659 netif_carrier_on(netdev);
3660
3661 igb_ping_all_vfs(adapter);
3662 igb_check_vf_rate_limit(adapter);
3663
3664 /* link state has changed, schedule phy info update */
3665 if (!test_bit(__IGB_DOWN, &adapter->state))
3666 mod_timer(&adapter->phy_info_timer,
3667 round_jiffies(jiffies + 2 * HZ));
3668 }
3669 } else {
3670 if (netif_carrier_ok(netdev)) {
3671 adapter->link_speed = 0;
3672 adapter->link_duplex = 0;
3673
3674 /* check for thermal sensor event */
3675 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3676 printk(KERN_ERR "igb: %s The network adapter "
3677 "was stopped because it "
3678 "overheated.\n",
3679 netdev->name);
3680 }
3681
3682 /* Links status message must follow this format */
3683 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3684 netdev->name);
3685 netif_carrier_off(netdev);
3686
3687 igb_ping_all_vfs(adapter);
3688
3689 /* link state has changed, schedule phy info update */
3690 if (!test_bit(__IGB_DOWN, &adapter->state))
3691 mod_timer(&adapter->phy_info_timer,
3692 round_jiffies(jiffies + 2 * HZ));
3693 }
3694 }
3695
3696 spin_lock(&adapter->stats64_lock);
3697 igb_update_stats(adapter, &adapter->stats64);
3698 spin_unlock(&adapter->stats64_lock);
3699
3700 for (i = 0; i < adapter->num_tx_queues; i++) {
3701 struct igb_ring *tx_ring = adapter->tx_ring[i];
3702 if (!netif_carrier_ok(netdev)) {
3703 /* We've lost link, so the controller stops DMA,
3704 * but we've got queued Tx work that's never going
3705 * to get done, so reset controller to flush Tx.
3706 * (Do the reset outside of interrupt context). */
3707 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3708 adapter->tx_timeout_count++;
3709 schedule_work(&adapter->reset_task);
3710 /* return immediately since reset is imminent */
3711 return;
3712 }
3713 }
3714
3715 /* Force detection of hung controller every watchdog period */
3716 tx_ring->detect_tx_hung = true;
3717 }
3718
3719 /* Cause software interrupt to ensure rx ring is cleaned */
3720 if (adapter->msix_entries) {
3721 u32 eics = 0;
3722 for (i = 0; i < adapter->num_q_vectors; i++) {
3723 struct igb_q_vector *q_vector = adapter->q_vector[i];
3724 eics |= q_vector->eims_value;
3725 }
3726 wr32(E1000_EICS, eics);
3727 } else {
3728 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3729 }
3730
3731 igb_spoof_check(adapter);
3732
3733 /* Reset the timer */
3734 if (!test_bit(__IGB_DOWN, &adapter->state))
3735 mod_timer(&adapter->watchdog_timer,
3736 round_jiffies(jiffies + 2 * HZ));
3737}
3738
3739enum latency_range {
3740 lowest_latency = 0,
3741 low_latency = 1,
3742 bulk_latency = 2,
3743 latency_invalid = 255
3744};
3745
3746/**
3747 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3748 *
3749 * Stores a new ITR value based on strictly on packet size. This
3750 * algorithm is less sophisticated than that used in igb_update_itr,
3751 * due to the difficulty of synchronizing statistics across multiple
3752 * receive rings. The divisors and thresholds used by this function
3753 * were determined based on theoretical maximum wire speed and testing
3754 * data, in order to minimize response time while increasing bulk
3755 * throughput.
3756 * This functionality is controlled by the InterruptThrottleRate module
3757 * parameter (see igb_param.c)
3758 * NOTE: This function is called only when operating in a multiqueue
3759 * receive environment.
3760 * @q_vector: pointer to q_vector
3761 **/
3762static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3763{
3764 int new_val = q_vector->itr_val;
3765 int avg_wire_size = 0;
3766 struct igb_adapter *adapter = q_vector->adapter;
3767 struct igb_ring *ring;
3768 unsigned int packets;
3769
3770 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3771 * ints/sec - ITR timer value of 120 ticks.
3772 */
3773 if (adapter->link_speed != SPEED_1000) {
3774 new_val = 976;
3775 goto set_itr_val;
3776 }
3777
3778 ring = q_vector->rx_ring;
3779 if (ring) {
3780 packets = ACCESS_ONCE(ring->total_packets);
3781
3782 if (packets)
3783 avg_wire_size = ring->total_bytes / packets;
3784 }
3785
3786 ring = q_vector->tx_ring;
3787 if (ring) {
3788 packets = ACCESS_ONCE(ring->total_packets);
3789
3790 if (packets)
3791 avg_wire_size = max_t(u32, avg_wire_size,
3792 ring->total_bytes / packets);
3793 }
3794
3795 /* if avg_wire_size isn't set no work was done */
3796 if (!avg_wire_size)
3797 goto clear_counts;
3798
3799 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3800 avg_wire_size += 24;
3801
3802 /* Don't starve jumbo frames */
3803 avg_wire_size = min(avg_wire_size, 3000);
3804
3805 /* Give a little boost to mid-size frames */
3806 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3807 new_val = avg_wire_size / 3;
3808 else
3809 new_val = avg_wire_size / 2;
3810
3811 /* when in itr mode 3 do not exceed 20K ints/sec */
3812 if (adapter->rx_itr_setting == 3 && new_val < 196)
3813 new_val = 196;
3814
3815set_itr_val:
3816 if (new_val != q_vector->itr_val) {
3817 q_vector->itr_val = new_val;
3818 q_vector->set_itr = 1;
3819 }
3820clear_counts:
3821 if (q_vector->rx_ring) {
3822 q_vector->rx_ring->total_bytes = 0;
3823 q_vector->rx_ring->total_packets = 0;
3824 }
3825 if (q_vector->tx_ring) {
3826 q_vector->tx_ring->total_bytes = 0;
3827 q_vector->tx_ring->total_packets = 0;
3828 }
3829}
3830
3831/**
3832 * igb_update_itr - update the dynamic ITR value based on statistics
3833 * Stores a new ITR value based on packets and byte
3834 * counts during the last interrupt. The advantage of per interrupt
3835 * computation is faster updates and more accurate ITR for the current
3836 * traffic pattern. Constants in this function were computed
3837 * based on theoretical maximum wire speed and thresholds were set based
3838 * on testing data as well as attempting to minimize response time
3839 * while increasing bulk throughput.
3840 * this functionality is controlled by the InterruptThrottleRate module
3841 * parameter (see igb_param.c)
3842 * NOTE: These calculations are only valid when operating in a single-
3843 * queue environment.
3844 * @adapter: pointer to adapter
3845 * @itr_setting: current q_vector->itr_val
3846 * @packets: the number of packets during this measurement interval
3847 * @bytes: the number of bytes during this measurement interval
3848 **/
3849static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3850 int packets, int bytes)
3851{
3852 unsigned int retval = itr_setting;
3853
3854 if (packets == 0)
3855 goto update_itr_done;
3856
3857 switch (itr_setting) {
3858 case lowest_latency:
3859 /* handle TSO and jumbo frames */
3860 if (bytes/packets > 8000)
3861 retval = bulk_latency;
3862 else if ((packets < 5) && (bytes > 512))
3863 retval = low_latency;
3864 break;
3865 case low_latency: /* 50 usec aka 20000 ints/s */
3866 if (bytes > 10000) {
3867 /* this if handles the TSO accounting */
3868 if (bytes/packets > 8000) {
3869 retval = bulk_latency;
3870 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3871 retval = bulk_latency;
3872 } else if ((packets > 35)) {
3873 retval = lowest_latency;
3874 }
3875 } else if (bytes/packets > 2000) {
3876 retval = bulk_latency;
3877 } else if (packets <= 2 && bytes < 512) {
3878 retval = lowest_latency;
3879 }
3880 break;
3881 case bulk_latency: /* 250 usec aka 4000 ints/s */
3882 if (bytes > 25000) {
3883 if (packets > 35)
3884 retval = low_latency;
3885 } else if (bytes < 1500) {
3886 retval = low_latency;
3887 }
3888 break;
3889 }
3890
3891update_itr_done:
3892 return retval;
3893}
3894
3895static void igb_set_itr(struct igb_adapter *adapter)
3896{
3897 struct igb_q_vector *q_vector = adapter->q_vector[0];
3898 u16 current_itr;
3899 u32 new_itr = q_vector->itr_val;
3900
3901 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3902 if (adapter->link_speed != SPEED_1000) {
3903 current_itr = 0;
3904 new_itr = 4000;
3905 goto set_itr_now;
3906 }
3907
3908 adapter->rx_itr = igb_update_itr(adapter,
3909 adapter->rx_itr,
3910 q_vector->rx_ring->total_packets,
3911 q_vector->rx_ring->total_bytes);
3912
3913 adapter->tx_itr = igb_update_itr(adapter,
3914 adapter->tx_itr,
3915 q_vector->tx_ring->total_packets,
3916 q_vector->tx_ring->total_bytes);
3917 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3918
3919 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3920 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3921 current_itr = low_latency;
3922
3923 switch (current_itr) {
3924 /* counts and packets in update_itr are dependent on these numbers */
3925 case lowest_latency:
3926 new_itr = 56; /* aka 70,000 ints/sec */
3927 break;
3928 case low_latency:
3929 new_itr = 196; /* aka 20,000 ints/sec */
3930 break;
3931 case bulk_latency:
3932 new_itr = 980; /* aka 4,000 ints/sec */
3933 break;
3934 default:
3935 break;
3936 }
3937
3938set_itr_now:
3939 q_vector->rx_ring->total_bytes = 0;
3940 q_vector->rx_ring->total_packets = 0;
3941 q_vector->tx_ring->total_bytes = 0;
3942 q_vector->tx_ring->total_packets = 0;
3943
3944 if (new_itr != q_vector->itr_val) {
3945 /* this attempts to bias the interrupt rate towards Bulk
3946 * by adding intermediate steps when interrupt rate is
3947 * increasing */
3948 new_itr = new_itr > q_vector->itr_val ?
3949 max((new_itr * q_vector->itr_val) /
3950 (new_itr + (q_vector->itr_val >> 2)),
3951 new_itr) :
3952 new_itr;
3953 /* Don't write the value here; it resets the adapter's
3954 * internal timer, and causes us to delay far longer than
3955 * we should between interrupts. Instead, we write the ITR
3956 * value at the beginning of the next interrupt so the timing
3957 * ends up being correct.
3958 */
3959 q_vector->itr_val = new_itr;
3960 q_vector->set_itr = 1;
3961 }
3962}
3963
3964#define IGB_TX_FLAGS_CSUM 0x00000001
3965#define IGB_TX_FLAGS_VLAN 0x00000002
3966#define IGB_TX_FLAGS_TSO 0x00000004
3967#define IGB_TX_FLAGS_IPV4 0x00000008
3968#define IGB_TX_FLAGS_TSTAMP 0x00000010
3969#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3970#define IGB_TX_FLAGS_VLAN_SHIFT 16
3971
3972static inline int igb_tso_adv(struct igb_ring *tx_ring,
3973 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3974{
3975 struct e1000_adv_tx_context_desc *context_desc;
3976 unsigned int i;
3977 int err;
3978 struct igb_buffer *buffer_info;
3979 u32 info = 0, tu_cmd = 0;
3980 u32 mss_l4len_idx;
3981 u8 l4len;
3982
3983 if (skb_header_cloned(skb)) {
3984 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3985 if (err)
3986 return err;
3987 }
3988
3989 l4len = tcp_hdrlen(skb);
3990 *hdr_len += l4len;
3991
3992 if (skb->protocol == htons(ETH_P_IP)) {
3993 struct iphdr *iph = ip_hdr(skb);
3994 iph->tot_len = 0;
3995 iph->check = 0;
3996 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3997 iph->daddr, 0,
3998 IPPROTO_TCP,
3999 0);
4000 } else if (skb_is_gso_v6(skb)) {
4001 ipv6_hdr(skb)->payload_len = 0;
4002 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4003 &ipv6_hdr(skb)->daddr,
4004 0, IPPROTO_TCP, 0);
4005 }
4006
4007 i = tx_ring->next_to_use;
4008
4009 buffer_info = &tx_ring->buffer_info[i];
4010 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
4011 /* VLAN MACLEN IPLEN */
4012 if (tx_flags & IGB_TX_FLAGS_VLAN)
4013 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4014 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4015 *hdr_len += skb_network_offset(skb);
4016 info |= skb_network_header_len(skb);
4017 *hdr_len += skb_network_header_len(skb);
4018 context_desc->vlan_macip_lens = cpu_to_le32(info);
4019
4020 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4021 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4022
4023 if (skb->protocol == htons(ETH_P_IP))
4024 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
4025 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4026
4027 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4028
4029 /* MSS L4LEN IDX */
4030 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
4031 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
4032
4033 /* For 82575, context index must be unique per ring. */
4034 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
4035 mss_l4len_idx |= tx_ring->reg_idx << 4;
4036
4037 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4038 context_desc->seqnum_seed = 0;
4039
4040 buffer_info->time_stamp = jiffies;
4041 buffer_info->next_to_watch = i;
4042 buffer_info->dma = 0;
4043 i++;
4044 if (i == tx_ring->count)
4045 i = 0;
4046
4047 tx_ring->next_to_use = i;
4048
4049 return true;
4050}
4051
4052static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
4053 struct sk_buff *skb, u32 tx_flags)
4054{
4055 struct e1000_adv_tx_context_desc *context_desc;
4056 struct device *dev = tx_ring->dev;
4057 struct igb_buffer *buffer_info;
4058 u32 info = 0, tu_cmd = 0;
4059 unsigned int i;
4060
4061 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
4062 (tx_flags & IGB_TX_FLAGS_VLAN)) {
4063 i = tx_ring->next_to_use;
4064 buffer_info = &tx_ring->buffer_info[i];
4065 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
4066
4067 if (tx_flags & IGB_TX_FLAGS_VLAN)
4068 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4069
4070 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4071 if (skb->ip_summed == CHECKSUM_PARTIAL)
4072 info |= skb_network_header_len(skb);
4073
4074 context_desc->vlan_macip_lens = cpu_to_le32(info);
4075
4076 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4077
4078 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4079 __be16 protocol;
4080
4081 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
4082 const struct vlan_ethhdr *vhdr =
4083 (const struct vlan_ethhdr*)skb->data;
4084
4085 protocol = vhdr->h_vlan_encapsulated_proto;
4086 } else {
4087 protocol = skb->protocol;
4088 }
4089
4090 switch (protocol) {
4091 case cpu_to_be16(ETH_P_IP):
4092 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
4093 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4094 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4095 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4096 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4097 break;
4098 case cpu_to_be16(ETH_P_IPV6):
4099 /* XXX what about other V6 headers?? */
4100 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4101 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4102 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4103 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4104 break;
4105 default:
4106 if (unlikely(net_ratelimit()))
4107 dev_warn(dev,
4108 "partial checksum but proto=%x!\n",
4109 skb->protocol);
4110 break;
4111 }
4112 }
4113
4114 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4115 context_desc->seqnum_seed = 0;
4116 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
4117 context_desc->mss_l4len_idx =
4118 cpu_to_le32(tx_ring->reg_idx << 4);
4119
4120 buffer_info->time_stamp = jiffies;
4121 buffer_info->next_to_watch = i;
4122 buffer_info->dma = 0;
4123
4124 i++;
4125 if (i == tx_ring->count)
4126 i = 0;
4127 tx_ring->next_to_use = i;
4128
4129 return true;
4130 }
4131 return false;
4132}
4133
4134#define IGB_MAX_TXD_PWR 16
4135#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4136
4137static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
4138 unsigned int first)
4139{
4140 struct igb_buffer *buffer_info;
4141 struct device *dev = tx_ring->dev;
4142 unsigned int hlen = skb_headlen(skb);
4143 unsigned int count = 0, i;
4144 unsigned int f;
4145 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
4146
4147 i = tx_ring->next_to_use;
4148
4149 buffer_info = &tx_ring->buffer_info[i];
4150 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4151 buffer_info->length = hlen;
4152 /* set time_stamp *before* dma to help avoid a possible race */
4153 buffer_info->time_stamp = jiffies;
4154 buffer_info->next_to_watch = i;
4155 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
4156 DMA_TO_DEVICE);
4157 if (dma_mapping_error(dev, buffer_info->dma))
4158 goto dma_error;
4159
4160 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4161 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
4162 unsigned int len = frag->size;
4163
4164 count++;
4165 i++;
4166 if (i == tx_ring->count)
4167 i = 0;
4168
4169 buffer_info = &tx_ring->buffer_info[i];
4170 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4171 buffer_info->length = len;
4172 buffer_info->time_stamp = jiffies;
4173 buffer_info->next_to_watch = i;
4174 buffer_info->mapped_as_page = true;
4175 buffer_info->dma = dma_map_page(dev,
4176 frag->page,
4177 frag->page_offset,
4178 len,
4179 DMA_TO_DEVICE);
4180 if (dma_mapping_error(dev, buffer_info->dma))
4181 goto dma_error;
4182
4183 }
4184
4185 tx_ring->buffer_info[i].skb = skb;
4186 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
4187 /* multiply data chunks by size of headers */
4188 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
4189 tx_ring->buffer_info[i].gso_segs = gso_segs;
4190 tx_ring->buffer_info[first].next_to_watch = i;
4191
4192 return ++count;
4193
4194dma_error:
4195 dev_err(dev, "TX DMA map failed\n");
4196
4197 /* clear timestamp and dma mappings for failed buffer_info mapping */
4198 buffer_info->dma = 0;
4199 buffer_info->time_stamp = 0;
4200 buffer_info->length = 0;
4201 buffer_info->next_to_watch = 0;
4202 buffer_info->mapped_as_page = false;
4203
4204 /* clear timestamp and dma mappings for remaining portion of packet */
4205 while (count--) {
4206 if (i == 0)
4207 i = tx_ring->count;
4208 i--;
4209 buffer_info = &tx_ring->buffer_info[i];
4210 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4211 }
4212
4213 return 0;
4214}
4215
4216static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
4217 u32 tx_flags, int count, u32 paylen,
4218 u8 hdr_len)
4219{
4220 union e1000_adv_tx_desc *tx_desc;
4221 struct igb_buffer *buffer_info;
4222 u32 olinfo_status = 0, cmd_type_len;
4223 unsigned int i = tx_ring->next_to_use;
4224
4225 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
4226 E1000_ADVTXD_DCMD_DEXT);
4227
4228 if (tx_flags & IGB_TX_FLAGS_VLAN)
4229 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
4230
4231 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4232 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
4233
4234 if (tx_flags & IGB_TX_FLAGS_TSO) {
4235 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
4236
4237 /* insert tcp checksum */
4238 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4239
4240 /* insert ip checksum */
4241 if (tx_flags & IGB_TX_FLAGS_IPV4)
4242 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4243
4244 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
4245 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4246 }
4247
4248 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4249 (tx_flags & (IGB_TX_FLAGS_CSUM |
4250 IGB_TX_FLAGS_TSO |
4251 IGB_TX_FLAGS_VLAN)))
4252 olinfo_status |= tx_ring->reg_idx << 4;
4253
4254 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4255
4256 do {
4257 buffer_info = &tx_ring->buffer_info[i];
4258 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4259 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4260 tx_desc->read.cmd_type_len =
4261 cpu_to_le32(cmd_type_len | buffer_info->length);
4262 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4263 count--;
4264 i++;
4265 if (i == tx_ring->count)
4266 i = 0;
4267 } while (count > 0);
4268
4269 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
4270 /* Force memory writes to complete before letting h/w
4271 * know there are new descriptors to fetch. (Only
4272 * applicable for weak-ordered memory model archs,
4273 * such as IA-64). */
4274 wmb();
4275
4276 tx_ring->next_to_use = i;
4277 writel(i, tx_ring->tail);
4278 /* we need this if more than one processor can write to our tail
4279 * at a time, it syncronizes IO on IA64/Altix systems */
4280 mmiowb();
4281}
4282
4283static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4284{
4285 struct net_device *netdev = tx_ring->netdev;
4286
4287 netif_stop_subqueue(netdev, tx_ring->queue_index);
4288
4289 /* Herbert's original patch had:
4290 * smp_mb__after_netif_stop_queue();
4291 * but since that doesn't exist yet, just open code it. */
4292 smp_mb();
4293
4294 /* We need to check again in a case another CPU has just
4295 * made room available. */
4296 if (igb_desc_unused(tx_ring) < size)
4297 return -EBUSY;
4298
4299 /* A reprieve! */
4300 netif_wake_subqueue(netdev, tx_ring->queue_index);
4301
4302 u64_stats_update_begin(&tx_ring->tx_syncp2);
4303 tx_ring->tx_stats.restart_queue2++;
4304 u64_stats_update_end(&tx_ring->tx_syncp2);
4305
4306 return 0;
4307}
4308
4309static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4310{
4311 if (igb_desc_unused(tx_ring) >= size)
4312 return 0;
4313 return __igb_maybe_stop_tx(tx_ring, size);
4314}
4315
4316netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4317 struct igb_ring *tx_ring)
4318{
4319 int tso = 0, count;
4320 u32 tx_flags = 0;
4321 u16 first;
4322 u8 hdr_len = 0;
4323
4324 /* need: 1 descriptor per page,
4325 * + 2 desc gap to keep tail from touching head,
4326 * + 1 desc for skb->data,
4327 * + 1 desc for context descriptor,
4328 * otherwise try next time */
4329 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
4330 /* this is a hard error */
4331 return NETDEV_TX_BUSY;
4332 }
4333
4334 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4335 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4336 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4337 }
4338
4339 if (vlan_tx_tag_present(skb)) {
4340 tx_flags |= IGB_TX_FLAGS_VLAN;
4341 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4342 }
4343
4344 if (skb->protocol == htons(ETH_P_IP))
4345 tx_flags |= IGB_TX_FLAGS_IPV4;
4346
4347 first = tx_ring->next_to_use;
4348 if (skb_is_gso(skb)) {
4349 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
4350
4351 if (tso < 0) {
4352 dev_kfree_skb_any(skb);
4353 return NETDEV_TX_OK;
4354 }
4355 }
4356
4357 if (tso)
4358 tx_flags |= IGB_TX_FLAGS_TSO;
4359 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
4360 (skb->ip_summed == CHECKSUM_PARTIAL))
4361 tx_flags |= IGB_TX_FLAGS_CSUM;
4362
4363 /*
4364 * count reflects descriptors mapped, if 0 or less then mapping error
4365 * has occurred and we need to rewind the descriptor queue
4366 */
4367 count = igb_tx_map_adv(tx_ring, skb, first);
4368 if (!count) {
4369 dev_kfree_skb_any(skb);
4370 tx_ring->buffer_info[first].time_stamp = 0;
4371 tx_ring->next_to_use = first;
4372 return NETDEV_TX_OK;
4373 }
4374
4375 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
4376
4377 /* Make sure there is space in the ring for the next send. */
4378 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
4379
4380 return NETDEV_TX_OK;
4381}
4382
4383static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
4384 struct net_device *netdev)
4385{
4386 struct igb_adapter *adapter = netdev_priv(netdev);
4387 struct igb_ring *tx_ring;
4388 int r_idx = 0;
4389
4390 if (test_bit(__IGB_DOWN, &adapter->state)) {
4391 dev_kfree_skb_any(skb);
4392 return NETDEV_TX_OK;
4393 }
4394
4395 if (skb->len <= 0) {
4396 dev_kfree_skb_any(skb);
4397 return NETDEV_TX_OK;
4398 }
4399
4400 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
4401 tx_ring = adapter->multi_tx_table[r_idx];
4402
4403 /* This goes back to the question of how to logically map a tx queue
4404 * to a flow. Right now, performance is impacted slightly negatively
4405 * if using multiple tx queues. If the stack breaks away from a
4406 * single qdisc implementation, we can look at this again. */
4407 return igb_xmit_frame_ring_adv(skb, tx_ring);
4408}
4409
4410/**
4411 * igb_tx_timeout - Respond to a Tx Hang
4412 * @netdev: network interface device structure
4413 **/
4414static void igb_tx_timeout(struct net_device *netdev)
4415{
4416 struct igb_adapter *adapter = netdev_priv(netdev);
4417 struct e1000_hw *hw = &adapter->hw;
4418
4419 /* Do the reset outside of interrupt context */
4420 adapter->tx_timeout_count++;
4421
4422 if (hw->mac.type == e1000_82580)
4423 hw->dev_spec._82575.global_device_reset = true;
4424
4425 schedule_work(&adapter->reset_task);
4426 wr32(E1000_EICS,
4427 (adapter->eims_enable_mask & ~adapter->eims_other));
4428}
4429
4430static void igb_reset_task(struct work_struct *work)
4431{
4432 struct igb_adapter *adapter;
4433 adapter = container_of(work, struct igb_adapter, reset_task);
4434
4435 igb_dump(adapter);
4436 netdev_err(adapter->netdev, "Reset adapter\n");
4437 igb_reinit_locked(adapter);
4438}
4439
4440/**
4441 * igb_get_stats64 - Get System Network Statistics
4442 * @netdev: network interface device structure
4443 * @stats: rtnl_link_stats64 pointer
4444 *
4445 **/
4446static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4447 struct rtnl_link_stats64 *stats)
4448{
4449 struct igb_adapter *adapter = netdev_priv(netdev);
4450
4451 spin_lock(&adapter->stats64_lock);
4452 igb_update_stats(adapter, &adapter->stats64);
4453 memcpy(stats, &adapter->stats64, sizeof(*stats));
4454 spin_unlock(&adapter->stats64_lock);
4455
4456 return stats;
4457}
4458
4459/**
4460 * igb_change_mtu - Change the Maximum Transfer Unit
4461 * @netdev: network interface device structure
4462 * @new_mtu: new value for maximum frame size
4463 *
4464 * Returns 0 on success, negative on failure
4465 **/
4466static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4467{
4468 struct igb_adapter *adapter = netdev_priv(netdev);
4469 struct pci_dev *pdev = adapter->pdev;
4470 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4471 u32 rx_buffer_len, i;
4472
4473 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
4474 dev_err(&pdev->dev, "Invalid MTU setting\n");
4475 return -EINVAL;
4476 }
4477
4478 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4479 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
4480 return -EINVAL;
4481 }
4482
4483 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4484 msleep(1);
4485
4486 /* igb_down has a dependency on max_frame_size */
4487 adapter->max_frame_size = max_frame;
4488
4489 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4490 * means we reserve 2 more, this pushes us to allocate from the next
4491 * larger slab size.
4492 * i.e. RXBUFFER_2048 --> size-4096 slab
4493 */
4494
4495 if (adapter->hw.mac.type == e1000_82580)
4496 max_frame += IGB_TS_HDR_LEN;
4497
4498 if (max_frame <= IGB_RXBUFFER_1024)
4499 rx_buffer_len = IGB_RXBUFFER_1024;
4500 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
4501 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
4502 else
4503 rx_buffer_len = IGB_RXBUFFER_128;
4504
4505 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4506 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4507 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4508
4509 if ((adapter->hw.mac.type == e1000_82580) &&
4510 (rx_buffer_len == IGB_RXBUFFER_128))
4511 rx_buffer_len += IGB_RXBUFFER_64;
4512
4513 if (netif_running(netdev))
4514 igb_down(adapter);
4515
4516 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
4517 netdev->mtu, new_mtu);
4518 netdev->mtu = new_mtu;
4519
4520 for (i = 0; i < adapter->num_rx_queues; i++)
4521 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
4522
4523 if (netif_running(netdev))
4524 igb_up(adapter);
4525 else
4526 igb_reset(adapter);
4527
4528 clear_bit(__IGB_RESETTING, &adapter->state);
4529
4530 return 0;
4531}
4532
4533/**
4534 * igb_update_stats - Update the board statistics counters
4535 * @adapter: board private structure
4536 **/
4537
4538void igb_update_stats(struct igb_adapter *adapter,
4539 struct rtnl_link_stats64 *net_stats)
4540{
4541 struct e1000_hw *hw = &adapter->hw;
4542 struct pci_dev *pdev = adapter->pdev;
4543 u32 reg, mpc;
4544 u16 phy_tmp;
4545 int i;
4546 u64 bytes, packets;
4547 unsigned int start;
4548 u64 _bytes, _packets;
4549
4550#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4551
4552 /*
4553 * Prevent stats update while adapter is being reset, or if the pci
4554 * connection is down.
4555 */
4556 if (adapter->link_speed == 0)
4557 return;
4558 if (pci_channel_offline(pdev))
4559 return;
4560
4561 bytes = 0;
4562 packets = 0;
4563 for (i = 0; i < adapter->num_rx_queues; i++) {
4564 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4565 struct igb_ring *ring = adapter->rx_ring[i];
4566
4567 ring->rx_stats.drops += rqdpc_tmp;
4568 net_stats->rx_fifo_errors += rqdpc_tmp;
4569
4570 do {
4571 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4572 _bytes = ring->rx_stats.bytes;
4573 _packets = ring->rx_stats.packets;
4574 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4575 bytes += _bytes;
4576 packets += _packets;
4577 }
4578
4579 net_stats->rx_bytes = bytes;
4580 net_stats->rx_packets = packets;
4581
4582 bytes = 0;
4583 packets = 0;
4584 for (i = 0; i < adapter->num_tx_queues; i++) {
4585 struct igb_ring *ring = adapter->tx_ring[i];
4586 do {
4587 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4588 _bytes = ring->tx_stats.bytes;
4589 _packets = ring->tx_stats.packets;
4590 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4591 bytes += _bytes;
4592 packets += _packets;
4593 }
4594 net_stats->tx_bytes = bytes;
4595 net_stats->tx_packets = packets;
4596
4597 /* read stats registers */
4598 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4599 adapter->stats.gprc += rd32(E1000_GPRC);
4600 adapter->stats.gorc += rd32(E1000_GORCL);
4601 rd32(E1000_GORCH); /* clear GORCL */
4602 adapter->stats.bprc += rd32(E1000_BPRC);
4603 adapter->stats.mprc += rd32(E1000_MPRC);
4604 adapter->stats.roc += rd32(E1000_ROC);
4605
4606 adapter->stats.prc64 += rd32(E1000_PRC64);
4607 adapter->stats.prc127 += rd32(E1000_PRC127);
4608 adapter->stats.prc255 += rd32(E1000_PRC255);
4609 adapter->stats.prc511 += rd32(E1000_PRC511);
4610 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4611 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4612 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4613 adapter->stats.sec += rd32(E1000_SEC);
4614
4615 mpc = rd32(E1000_MPC);
4616 adapter->stats.mpc += mpc;
4617 net_stats->rx_fifo_errors += mpc;
4618 adapter->stats.scc += rd32(E1000_SCC);
4619 adapter->stats.ecol += rd32(E1000_ECOL);
4620 adapter->stats.mcc += rd32(E1000_MCC);
4621 adapter->stats.latecol += rd32(E1000_LATECOL);
4622 adapter->stats.dc += rd32(E1000_DC);
4623 adapter->stats.rlec += rd32(E1000_RLEC);
4624 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4625 adapter->stats.xontxc += rd32(E1000_XONTXC);
4626 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4627 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4628 adapter->stats.fcruc += rd32(E1000_FCRUC);
4629 adapter->stats.gptc += rd32(E1000_GPTC);
4630 adapter->stats.gotc += rd32(E1000_GOTCL);
4631 rd32(E1000_GOTCH); /* clear GOTCL */
4632 adapter->stats.rnbc += rd32(E1000_RNBC);
4633 adapter->stats.ruc += rd32(E1000_RUC);
4634 adapter->stats.rfc += rd32(E1000_RFC);
4635 adapter->stats.rjc += rd32(E1000_RJC);
4636 adapter->stats.tor += rd32(E1000_TORH);
4637 adapter->stats.tot += rd32(E1000_TOTH);
4638 adapter->stats.tpr += rd32(E1000_TPR);
4639
4640 adapter->stats.ptc64 += rd32(E1000_PTC64);
4641 adapter->stats.ptc127 += rd32(E1000_PTC127);
4642 adapter->stats.ptc255 += rd32(E1000_PTC255);
4643 adapter->stats.ptc511 += rd32(E1000_PTC511);
4644 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4645 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4646
4647 adapter->stats.mptc += rd32(E1000_MPTC);
4648 adapter->stats.bptc += rd32(E1000_BPTC);
4649
4650 adapter->stats.tpt += rd32(E1000_TPT);
4651 adapter->stats.colc += rd32(E1000_COLC);
4652
4653 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4654 /* read internal phy specific stats */
4655 reg = rd32(E1000_CTRL_EXT);
4656 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4657 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4658 adapter->stats.tncrs += rd32(E1000_TNCRS);
4659 }
4660
4661 adapter->stats.tsctc += rd32(E1000_TSCTC);
4662 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4663
4664 adapter->stats.iac += rd32(E1000_IAC);
4665 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4666 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4667 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4668 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4669 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4670 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4671 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4672 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4673
4674 /* Fill out the OS statistics structure */
4675 net_stats->multicast = adapter->stats.mprc;
4676 net_stats->collisions = adapter->stats.colc;
4677
4678 /* Rx Errors */
4679
4680 /* RLEC on some newer hardware can be incorrect so build
4681 * our own version based on RUC and ROC */
4682 net_stats->rx_errors = adapter->stats.rxerrc +
4683 adapter->stats.crcerrs + adapter->stats.algnerrc +
4684 adapter->stats.ruc + adapter->stats.roc +
4685 adapter->stats.cexterr;
4686 net_stats->rx_length_errors = adapter->stats.ruc +
4687 adapter->stats.roc;
4688 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4689 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4690 net_stats->rx_missed_errors = adapter->stats.mpc;
4691
4692 /* Tx Errors */
4693 net_stats->tx_errors = adapter->stats.ecol +
4694 adapter->stats.latecol;
4695 net_stats->tx_aborted_errors = adapter->stats.ecol;
4696 net_stats->tx_window_errors = adapter->stats.latecol;
4697 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4698
4699 /* Tx Dropped needs to be maintained elsewhere */
4700
4701 /* Phy Stats */
4702 if (hw->phy.media_type == e1000_media_type_copper) {
4703 if ((adapter->link_speed == SPEED_1000) &&
4704 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
4705 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4706 adapter->phy_stats.idle_errors += phy_tmp;
4707 }
4708 }
4709
4710 /* Management Stats */
4711 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4712 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4713 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4714
4715 /* OS2BMC Stats */
4716 reg = rd32(E1000_MANC);
4717 if (reg & E1000_MANC_EN_BMC2OS) {
4718 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4719 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4720 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4721 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4722 }
4723}
4724
4725static irqreturn_t igb_msix_other(int irq, void *data)
4726{
4727 struct igb_adapter *adapter = data;
4728 struct e1000_hw *hw = &adapter->hw;
4729 u32 icr = rd32(E1000_ICR);
4730 /* reading ICR causes bit 31 of EICR to be cleared */
4731
4732 if (icr & E1000_ICR_DRSTA)
4733 schedule_work(&adapter->reset_task);
4734
4735 if (icr & E1000_ICR_DOUTSYNC) {
4736 /* HW is reporting DMA is out of sync */
4737 adapter->stats.doosync++;
4738 /* The DMA Out of Sync is also indication of a spoof event
4739 * in IOV mode. Check the Wrong VM Behavior register to
4740 * see if it is really a spoof event. */
4741 igb_check_wvbr(adapter);
4742 }
4743
4744 /* Check for a mailbox event */
4745 if (icr & E1000_ICR_VMMB)
4746 igb_msg_task(adapter);
4747
4748 if (icr & E1000_ICR_LSC) {
4749 hw->mac.get_link_status = 1;
4750 /* guard against interrupt when we're going down */
4751 if (!test_bit(__IGB_DOWN, &adapter->state))
4752 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4753 }
4754
4755 if (adapter->vfs_allocated_count)
4756 wr32(E1000_IMS, E1000_IMS_LSC |
4757 E1000_IMS_VMMB |
4758 E1000_IMS_DOUTSYNC);
4759 else
4760 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
4761 wr32(E1000_EIMS, adapter->eims_other);
4762
4763 return IRQ_HANDLED;
4764}
4765
4766static void igb_write_itr(struct igb_q_vector *q_vector)
4767{
4768 struct igb_adapter *adapter = q_vector->adapter;
4769 u32 itr_val = q_vector->itr_val & 0x7FFC;
4770
4771 if (!q_vector->set_itr)
4772 return;
4773
4774 if (!itr_val)
4775 itr_val = 0x4;
4776
4777 if (adapter->hw.mac.type == e1000_82575)
4778 itr_val |= itr_val << 16;
4779 else
4780 itr_val |= 0x8000000;
4781
4782 writel(itr_val, q_vector->itr_register);
4783 q_vector->set_itr = 0;
4784}
4785
4786static irqreturn_t igb_msix_ring(int irq, void *data)
4787{
4788 struct igb_q_vector *q_vector = data;
4789
4790 /* Write the ITR value calculated from the previous interrupt. */
4791 igb_write_itr(q_vector);
4792
4793 napi_schedule(&q_vector->napi);
4794
4795 return IRQ_HANDLED;
4796}
4797
4798#ifdef CONFIG_IGB_DCA
4799static void igb_update_dca(struct igb_q_vector *q_vector)
4800{
4801 struct igb_adapter *adapter = q_vector->adapter;
4802 struct e1000_hw *hw = &adapter->hw;
4803 int cpu = get_cpu();
4804
4805 if (q_vector->cpu == cpu)
4806 goto out_no_update;
4807
4808 if (q_vector->tx_ring) {
4809 int q = q_vector->tx_ring->reg_idx;
4810 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4811 if (hw->mac.type == e1000_82575) {
4812 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4813 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4814 } else {
4815 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4816 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4817 E1000_DCA_TXCTRL_CPUID_SHIFT;
4818 }
4819 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4820 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4821 }
4822 if (q_vector->rx_ring) {
4823 int q = q_vector->rx_ring->reg_idx;
4824 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4825 if (hw->mac.type == e1000_82575) {
4826 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4827 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4828 } else {
4829 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4830 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4831 E1000_DCA_RXCTRL_CPUID_SHIFT;
4832 }
4833 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4834 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4835 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4836 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4837 }
4838 q_vector->cpu = cpu;
4839out_no_update:
4840 put_cpu();
4841}
4842
4843static void igb_setup_dca(struct igb_adapter *adapter)
4844{
4845 struct e1000_hw *hw = &adapter->hw;
4846 int i;
4847
4848 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4849 return;
4850
4851 /* Always use CB2 mode, difference is masked in the CB driver. */
4852 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4853
4854 for (i = 0; i < adapter->num_q_vectors; i++) {
4855 adapter->q_vector[i]->cpu = -1;
4856 igb_update_dca(adapter->q_vector[i]);
4857 }
4858}
4859
4860static int __igb_notify_dca(struct device *dev, void *data)
4861{
4862 struct net_device *netdev = dev_get_drvdata(dev);
4863 struct igb_adapter *adapter = netdev_priv(netdev);
4864 struct pci_dev *pdev = adapter->pdev;
4865 struct e1000_hw *hw = &adapter->hw;
4866 unsigned long event = *(unsigned long *)data;
4867
4868 switch (event) {
4869 case DCA_PROVIDER_ADD:
4870 /* if already enabled, don't do it again */
4871 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4872 break;
4873 if (dca_add_requester(dev) == 0) {
4874 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4875 dev_info(&pdev->dev, "DCA enabled\n");
4876 igb_setup_dca(adapter);
4877 break;
4878 }
4879 /* Fall Through since DCA is disabled. */
4880 case DCA_PROVIDER_REMOVE:
4881 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4882 /* without this a class_device is left
4883 * hanging around in the sysfs model */
4884 dca_remove_requester(dev);
4885 dev_info(&pdev->dev, "DCA disabled\n");
4886 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4887 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4888 }
4889 break;
4890 }
4891
4892 return 0;
4893}
4894
4895static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4896 void *p)
4897{
4898 int ret_val;
4899
4900 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4901 __igb_notify_dca);
4902
4903 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4904}
4905#endif /* CONFIG_IGB_DCA */
4906
4907static void igb_ping_all_vfs(struct igb_adapter *adapter)
4908{
4909 struct e1000_hw *hw = &adapter->hw;
4910 u32 ping;
4911 int i;
4912
4913 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4914 ping = E1000_PF_CONTROL_MSG;
4915 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4916 ping |= E1000_VT_MSGTYPE_CTS;
4917 igb_write_mbx(hw, &ping, 1, i);
4918 }
4919}
4920
4921static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4922{
4923 struct e1000_hw *hw = &adapter->hw;
4924 u32 vmolr = rd32(E1000_VMOLR(vf));
4925 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4926
4927 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
4928 IGB_VF_FLAG_MULTI_PROMISC);
4929 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4930
4931 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4932 vmolr |= E1000_VMOLR_MPME;
4933 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
4934 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4935 } else {
4936 /*
4937 * if we have hashes and we are clearing a multicast promisc
4938 * flag we need to write the hashes to the MTA as this step
4939 * was previously skipped
4940 */
4941 if (vf_data->num_vf_mc_hashes > 30) {
4942 vmolr |= E1000_VMOLR_MPME;
4943 } else if (vf_data->num_vf_mc_hashes) {
4944 int j;
4945 vmolr |= E1000_VMOLR_ROMPE;
4946 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4947 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4948 }
4949 }
4950
4951 wr32(E1000_VMOLR(vf), vmolr);
4952
4953 /* there are flags left unprocessed, likely not supported */
4954 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4955 return -EINVAL;
4956
4957 return 0;
4958
4959}
4960
4961static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4962 u32 *msgbuf, u32 vf)
4963{
4964 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4965 u16 *hash_list = (u16 *)&msgbuf[1];
4966 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4967 int i;
4968
4969 /* salt away the number of multicast addresses assigned
4970 * to this VF for later use to restore when the PF multi cast
4971 * list changes
4972 */
4973 vf_data->num_vf_mc_hashes = n;
4974
4975 /* only up to 30 hash values supported */
4976 if (n > 30)
4977 n = 30;
4978
4979 /* store the hashes for later use */
4980 for (i = 0; i < n; i++)
4981 vf_data->vf_mc_hashes[i] = hash_list[i];
4982
4983 /* Flush and reset the mta with the new values */
4984 igb_set_rx_mode(adapter->netdev);
4985
4986 return 0;
4987}
4988
4989static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4990{
4991 struct e1000_hw *hw = &adapter->hw;
4992 struct vf_data_storage *vf_data;
4993 int i, j;
4994
4995 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4996 u32 vmolr = rd32(E1000_VMOLR(i));
4997 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4998
4999 vf_data = &adapter->vf_data[i];
5000
5001 if ((vf_data->num_vf_mc_hashes > 30) ||
5002 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5003 vmolr |= E1000_VMOLR_MPME;
5004 } else if (vf_data->num_vf_mc_hashes) {
5005 vmolr |= E1000_VMOLR_ROMPE;
5006 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5007 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5008 }
5009 wr32(E1000_VMOLR(i), vmolr);
5010 }
5011}
5012
5013static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5014{
5015 struct e1000_hw *hw = &adapter->hw;
5016 u32 pool_mask, reg, vid;
5017 int i;
5018
5019 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5020
5021 /* Find the vlan filter for this id */
5022 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5023 reg = rd32(E1000_VLVF(i));
5024
5025 /* remove the vf from the pool */
5026 reg &= ~pool_mask;
5027
5028 /* if pool is empty then remove entry from vfta */
5029 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5030 (reg & E1000_VLVF_VLANID_ENABLE)) {
5031 reg = 0;
5032 vid = reg & E1000_VLVF_VLANID_MASK;
5033 igb_vfta_set(hw, vid, false);
5034 }
5035
5036 wr32(E1000_VLVF(i), reg);
5037 }
5038
5039 adapter->vf_data[vf].vlans_enabled = 0;
5040}
5041
5042static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5043{
5044 struct e1000_hw *hw = &adapter->hw;
5045 u32 reg, i;
5046
5047 /* The vlvf table only exists on 82576 hardware and newer */
5048 if (hw->mac.type < e1000_82576)
5049 return -1;
5050
5051 /* we only need to do this if VMDq is enabled */
5052 if (!adapter->vfs_allocated_count)
5053 return -1;
5054
5055 /* Find the vlan filter for this id */
5056 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5057 reg = rd32(E1000_VLVF(i));
5058 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5059 vid == (reg & E1000_VLVF_VLANID_MASK))
5060 break;
5061 }
5062
5063 if (add) {
5064 if (i == E1000_VLVF_ARRAY_SIZE) {
5065 /* Did not find a matching VLAN ID entry that was
5066 * enabled. Search for a free filter entry, i.e.
5067 * one without the enable bit set
5068 */
5069 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5070 reg = rd32(E1000_VLVF(i));
5071 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5072 break;
5073 }
5074 }
5075 if (i < E1000_VLVF_ARRAY_SIZE) {
5076 /* Found an enabled/available entry */
5077 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5078
5079 /* if !enabled we need to set this up in vfta */
5080 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
5081 /* add VID to filter table */
5082 igb_vfta_set(hw, vid, true);
5083 reg |= E1000_VLVF_VLANID_ENABLE;
5084 }
5085 reg &= ~E1000_VLVF_VLANID_MASK;
5086 reg |= vid;
5087 wr32(E1000_VLVF(i), reg);
5088
5089 /* do not modify RLPML for PF devices */
5090 if (vf >= adapter->vfs_allocated_count)
5091 return 0;
5092
5093 if (!adapter->vf_data[vf].vlans_enabled) {
5094 u32 size;
5095 reg = rd32(E1000_VMOLR(vf));
5096 size = reg & E1000_VMOLR_RLPML_MASK;
5097 size += 4;
5098 reg &= ~E1000_VMOLR_RLPML_MASK;
5099 reg |= size;
5100 wr32(E1000_VMOLR(vf), reg);
5101 }
5102
5103 adapter->vf_data[vf].vlans_enabled++;
5104 return 0;
5105 }
5106 } else {
5107 if (i < E1000_VLVF_ARRAY_SIZE) {
5108 /* remove vf from the pool */
5109 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5110 /* if pool is empty then remove entry from vfta */
5111 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5112 reg = 0;
5113 igb_vfta_set(hw, vid, false);
5114 }
5115 wr32(E1000_VLVF(i), reg);
5116
5117 /* do not modify RLPML for PF devices */
5118 if (vf >= adapter->vfs_allocated_count)
5119 return 0;
5120
5121 adapter->vf_data[vf].vlans_enabled--;
5122 if (!adapter->vf_data[vf].vlans_enabled) {
5123 u32 size;
5124 reg = rd32(E1000_VMOLR(vf));
5125 size = reg & E1000_VMOLR_RLPML_MASK;
5126 size -= 4;
5127 reg &= ~E1000_VMOLR_RLPML_MASK;
5128 reg |= size;
5129 wr32(E1000_VMOLR(vf), reg);
5130 }
5131 }
5132 }
5133 return 0;
5134}
5135
5136static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5137{
5138 struct e1000_hw *hw = &adapter->hw;
5139
5140 if (vid)
5141 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5142 else
5143 wr32(E1000_VMVIR(vf), 0);
5144}
5145
5146static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5147 int vf, u16 vlan, u8 qos)
5148{
5149 int err = 0;
5150 struct igb_adapter *adapter = netdev_priv(netdev);
5151
5152 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5153 return -EINVAL;
5154 if (vlan || qos) {
5155 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5156 if (err)
5157 goto out;
5158 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5159 igb_set_vmolr(adapter, vf, !vlan);
5160 adapter->vf_data[vf].pf_vlan = vlan;
5161 adapter->vf_data[vf].pf_qos = qos;
5162 dev_info(&adapter->pdev->dev,
5163 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5164 if (test_bit(__IGB_DOWN, &adapter->state)) {
5165 dev_warn(&adapter->pdev->dev,
5166 "The VF VLAN has been set,"
5167 " but the PF device is not up.\n");
5168 dev_warn(&adapter->pdev->dev,
5169 "Bring the PF device up before"
5170 " attempting to use the VF device.\n");
5171 }
5172 } else {
5173 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5174 false, vf);
5175 igb_set_vmvir(adapter, vlan, vf);
5176 igb_set_vmolr(adapter, vf, true);
5177 adapter->vf_data[vf].pf_vlan = 0;
5178 adapter->vf_data[vf].pf_qos = 0;
5179 }
5180out:
5181 return err;
5182}
5183
5184static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5185{
5186 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5187 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5188
5189 return igb_vlvf_set(adapter, vid, add, vf);
5190}
5191
5192static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
5193{
5194 /* clear flags - except flag that indicates PF has set the MAC */
5195 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
5196 adapter->vf_data[vf].last_nack = jiffies;
5197
5198 /* reset offloads to defaults */
5199 igb_set_vmolr(adapter, vf, true);
5200
5201 /* reset vlans for device */
5202 igb_clear_vf_vfta(adapter, vf);
5203 if (adapter->vf_data[vf].pf_vlan)
5204 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5205 adapter->vf_data[vf].pf_vlan,
5206 adapter->vf_data[vf].pf_qos);
5207 else
5208 igb_clear_vf_vfta(adapter, vf);
5209
5210 /* reset multicast table array for vf */
5211 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5212
5213 /* Flush and reset the mta with the new values */
5214 igb_set_rx_mode(adapter->netdev);
5215}
5216
5217static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5218{
5219 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5220
5221 /* generate a new mac address as we were hotplug removed/added */
5222 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5223 random_ether_addr(vf_mac);
5224
5225 /* process remaining reset events */
5226 igb_vf_reset(adapter, vf);
5227}
5228
5229static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5230{
5231 struct e1000_hw *hw = &adapter->hw;
5232 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5233 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5234 u32 reg, msgbuf[3];
5235 u8 *addr = (u8 *)(&msgbuf[1]);
5236
5237 /* process all the same items cleared in a function level reset */
5238 igb_vf_reset(adapter, vf);
5239
5240 /* set vf mac address */
5241 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
5242
5243 /* enable transmit and receive for vf */
5244 reg = rd32(E1000_VFTE);
5245 wr32(E1000_VFTE, reg | (1 << vf));
5246 reg = rd32(E1000_VFRE);
5247 wr32(E1000_VFRE, reg | (1 << vf));
5248
5249 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
5250
5251 /* reply to reset with ack and vf mac address */
5252 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5253 memcpy(addr, vf_mac, 6);
5254 igb_write_mbx(hw, msgbuf, 3, vf);
5255}
5256
5257static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5258{
5259 /*
5260 * The VF MAC Address is stored in a packed array of bytes
5261 * starting at the second 32 bit word of the msg array
5262 */
5263 unsigned char *addr = (char *)&msg[1];
5264 int err = -1;
5265
5266 if (is_valid_ether_addr(addr))
5267 err = igb_set_vf_mac(adapter, vf, addr);
5268
5269 return err;
5270}
5271
5272static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5273{
5274 struct e1000_hw *hw = &adapter->hw;
5275 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5276 u32 msg = E1000_VT_MSGTYPE_NACK;
5277
5278 /* if device isn't clear to send it shouldn't be reading either */
5279 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5280 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
5281 igb_write_mbx(hw, &msg, 1, vf);
5282 vf_data->last_nack = jiffies;
5283 }
5284}
5285
5286static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5287{
5288 struct pci_dev *pdev = adapter->pdev;
5289 u32 msgbuf[E1000_VFMAILBOX_SIZE];
5290 struct e1000_hw *hw = &adapter->hw;
5291 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5292 s32 retval;
5293
5294 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
5295
5296 if (retval) {
5297 /* if receive failed revoke VF CTS stats and restart init */
5298 dev_err(&pdev->dev, "Error receiving message from VF\n");
5299 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5300 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5301 return;
5302 goto out;
5303 }
5304
5305 /* this is a message we already processed, do nothing */
5306 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5307 return;
5308
5309 /*
5310 * until the vf completes a reset it should not be
5311 * allowed to start any configuration.
5312 */
5313
5314 if (msgbuf[0] == E1000_VF_RESET) {
5315 igb_vf_reset_msg(adapter, vf);
5316 return;
5317 }
5318
5319 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
5320 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5321 return;
5322 retval = -1;
5323 goto out;
5324 }
5325
5326 switch ((msgbuf[0] & 0xFFFF)) {
5327 case E1000_VF_SET_MAC_ADDR:
5328 retval = -EINVAL;
5329 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5330 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5331 else
5332 dev_warn(&pdev->dev,
5333 "VF %d attempted to override administratively "
5334 "set MAC address\nReload the VF driver to "
5335 "resume operations\n", vf);
5336 break;
5337 case E1000_VF_SET_PROMISC:
5338 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5339 break;
5340 case E1000_VF_SET_MULTICAST:
5341 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5342 break;
5343 case E1000_VF_SET_LPE:
5344 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5345 break;
5346 case E1000_VF_SET_VLAN:
5347 retval = -1;
5348 if (vf_data->pf_vlan)
5349 dev_warn(&pdev->dev,
5350 "VF %d attempted to override administratively "
5351 "set VLAN tag\nReload the VF driver to "
5352 "resume operations\n", vf);
5353 else
5354 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5355 break;
5356 default:
5357 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
5358 retval = -1;
5359 break;
5360 }
5361
5362 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5363out:
5364 /* notify the VF of the results of what it sent us */
5365 if (retval)
5366 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5367 else
5368 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5369
5370 igb_write_mbx(hw, msgbuf, 1, vf);
5371}
5372
5373static void igb_msg_task(struct igb_adapter *adapter)
5374{
5375 struct e1000_hw *hw = &adapter->hw;
5376 u32 vf;
5377
5378 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5379 /* process any reset requests */
5380 if (!igb_check_for_rst(hw, vf))
5381 igb_vf_reset_event(adapter, vf);
5382
5383 /* process any messages pending */
5384 if (!igb_check_for_msg(hw, vf))
5385 igb_rcv_msg_from_vf(adapter, vf);
5386
5387 /* process any acks */
5388 if (!igb_check_for_ack(hw, vf))
5389 igb_rcv_ack_from_vf(adapter, vf);
5390 }
5391}
5392
5393/**
5394 * igb_set_uta - Set unicast filter table address
5395 * @adapter: board private structure
5396 *
5397 * The unicast table address is a register array of 32-bit registers.
5398 * The table is meant to be used in a way similar to how the MTA is used
5399 * however due to certain limitations in the hardware it is necessary to
5400 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5401 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
5402 **/
5403static void igb_set_uta(struct igb_adapter *adapter)
5404{
5405 struct e1000_hw *hw = &adapter->hw;
5406 int i;
5407
5408 /* The UTA table only exists on 82576 hardware and newer */
5409 if (hw->mac.type < e1000_82576)
5410 return;
5411
5412 /* we only need to do this if VMDq is enabled */
5413 if (!adapter->vfs_allocated_count)
5414 return;
5415
5416 for (i = 0; i < hw->mac.uta_reg_count; i++)
5417 array_wr32(E1000_UTA, i, ~0);
5418}
5419
5420/**
5421 * igb_intr_msi - Interrupt Handler
5422 * @irq: interrupt number
5423 * @data: pointer to a network interface device structure
5424 **/
5425static irqreturn_t igb_intr_msi(int irq, void *data)
5426{
5427 struct igb_adapter *adapter = data;
5428 struct igb_q_vector *q_vector = adapter->q_vector[0];
5429 struct e1000_hw *hw = &adapter->hw;
5430 /* read ICR disables interrupts using IAM */
5431 u32 icr = rd32(E1000_ICR);
5432
5433 igb_write_itr(q_vector);
5434
5435 if (icr & E1000_ICR_DRSTA)
5436 schedule_work(&adapter->reset_task);
5437
5438 if (icr & E1000_ICR_DOUTSYNC) {
5439 /* HW is reporting DMA is out of sync */
5440 adapter->stats.doosync++;
5441 }
5442
5443 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5444 hw->mac.get_link_status = 1;
5445 if (!test_bit(__IGB_DOWN, &adapter->state))
5446 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5447 }
5448
5449 napi_schedule(&q_vector->napi);
5450
5451 return IRQ_HANDLED;
5452}
5453
5454/**
5455 * igb_intr - Legacy Interrupt Handler
5456 * @irq: interrupt number
5457 * @data: pointer to a network interface device structure
5458 **/
5459static irqreturn_t igb_intr(int irq, void *data)
5460{
5461 struct igb_adapter *adapter = data;
5462 struct igb_q_vector *q_vector = adapter->q_vector[0];
5463 struct e1000_hw *hw = &adapter->hw;
5464 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5465 * need for the IMC write */
5466 u32 icr = rd32(E1000_ICR);
5467 if (!icr)
5468 return IRQ_NONE; /* Not our interrupt */
5469
5470 igb_write_itr(q_vector);
5471
5472 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5473 * not set, then the adapter didn't send an interrupt */
5474 if (!(icr & E1000_ICR_INT_ASSERTED))
5475 return IRQ_NONE;
5476
5477 if (icr & E1000_ICR_DRSTA)
5478 schedule_work(&adapter->reset_task);
5479
5480 if (icr & E1000_ICR_DOUTSYNC) {
5481 /* HW is reporting DMA is out of sync */
5482 adapter->stats.doosync++;
5483 }
5484
5485 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5486 hw->mac.get_link_status = 1;
5487 /* guard against interrupt when we're going down */
5488 if (!test_bit(__IGB_DOWN, &adapter->state))
5489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5490 }
5491
5492 napi_schedule(&q_vector->napi);
5493
5494 return IRQ_HANDLED;
5495}
5496
5497static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5498{
5499 struct igb_adapter *adapter = q_vector->adapter;
5500 struct e1000_hw *hw = &adapter->hw;
5501
5502 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5503 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
5504 if (!adapter->msix_entries)
5505 igb_set_itr(adapter);
5506 else
5507 igb_update_ring_itr(q_vector);
5508 }
5509
5510 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5511 if (adapter->msix_entries)
5512 wr32(E1000_EIMS, q_vector->eims_value);
5513 else
5514 igb_irq_enable(adapter);
5515 }
5516}
5517
5518/**
5519 * igb_poll - NAPI Rx polling callback
5520 * @napi: napi polling structure
5521 * @budget: count of how many packets we should handle
5522 **/
5523static int igb_poll(struct napi_struct *napi, int budget)
5524{
5525 struct igb_q_vector *q_vector = container_of(napi,
5526 struct igb_q_vector,
5527 napi);
5528 int tx_clean_complete = 1, work_done = 0;
5529
5530#ifdef CONFIG_IGB_DCA
5531 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5532 igb_update_dca(q_vector);
5533#endif
5534 if (q_vector->tx_ring)
5535 tx_clean_complete = igb_clean_tx_irq(q_vector);
5536
5537 if (q_vector->rx_ring)
5538 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
5539
5540 if (!tx_clean_complete)
5541 work_done = budget;
5542
5543 /* If not enough Rx work done, exit the polling mode */
5544 if (work_done < budget) {
5545 napi_complete(napi);
5546 igb_ring_irq_enable(q_vector);
5547 }
5548
5549 return work_done;
5550}
5551
5552/**
5553 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5554 * @adapter: board private structure
5555 * @shhwtstamps: timestamp structure to update
5556 * @regval: unsigned 64bit system time value.
5557 *
5558 * We need to convert the system time value stored in the RX/TXSTMP registers
5559 * into a hwtstamp which can be used by the upper level timestamping functions
5560 */
5561static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5562 struct skb_shared_hwtstamps *shhwtstamps,
5563 u64 regval)
5564{
5565 u64 ns;
5566
5567 /*
5568 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5569 * 24 to match clock shift we setup earlier.
5570 */
5571 if (adapter->hw.mac.type == e1000_82580)
5572 regval <<= IGB_82580_TSYNC_SHIFT;
5573
5574 ns = timecounter_cyc2time(&adapter->clock, regval);
5575 timecompare_update(&adapter->compare, ns);
5576 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5577 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5578 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5579}
5580
5581/**
5582 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5583 * @q_vector: pointer to q_vector containing needed info
5584 * @buffer: pointer to igb_buffer structure
5585 *
5586 * If we were asked to do hardware stamping and such a time stamp is
5587 * available, then it must have been for this skb here because we only
5588 * allow only one such packet into the queue.
5589 */
5590static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
5591{
5592 struct igb_adapter *adapter = q_vector->adapter;
5593 struct e1000_hw *hw = &adapter->hw;
5594 struct skb_shared_hwtstamps shhwtstamps;
5595 u64 regval;
5596
5597 /* if skb does not support hw timestamp or TX stamp not valid exit */
5598 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
5599 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5600 return;
5601
5602 regval = rd32(E1000_TXSTMPL);
5603 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5604
5605 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
5606 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5607}
5608
5609/**
5610 * igb_clean_tx_irq - Reclaim resources after transmit completes
5611 * @q_vector: pointer to q_vector containing needed info
5612 * returns true if ring is completely cleaned
5613 **/
5614static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5615{
5616 struct igb_adapter *adapter = q_vector->adapter;
5617 struct igb_ring *tx_ring = q_vector->tx_ring;
5618 struct net_device *netdev = tx_ring->netdev;
5619 struct e1000_hw *hw = &adapter->hw;
5620 struct igb_buffer *buffer_info;
5621 union e1000_adv_tx_desc *tx_desc, *eop_desc;
5622 unsigned int total_bytes = 0, total_packets = 0;
5623 unsigned int i, eop, count = 0;
5624 bool cleaned = false;
5625
5626 i = tx_ring->next_to_clean;
5627 eop = tx_ring->buffer_info[i].next_to_watch;
5628 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5629
5630 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5631 (count < tx_ring->count)) {
5632 rmb(); /* read buffer_info after eop_desc status */
5633 for (cleaned = false; !cleaned; count++) {
5634 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5635 buffer_info = &tx_ring->buffer_info[i];
5636 cleaned = (i == eop);
5637
5638 if (buffer_info->skb) {
5639 total_bytes += buffer_info->bytecount;
5640 /* gso_segs is currently only valid for tcp */
5641 total_packets += buffer_info->gso_segs;
5642 igb_tx_hwtstamp(q_vector, buffer_info);
5643 }
5644
5645 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
5646 tx_desc->wb.status = 0;
5647
5648 i++;
5649 if (i == tx_ring->count)
5650 i = 0;
5651 }
5652 eop = tx_ring->buffer_info[i].next_to_watch;
5653 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5654 }
5655
5656 tx_ring->next_to_clean = i;
5657
5658 if (unlikely(count &&
5659 netif_carrier_ok(netdev) &&
5660 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5661 /* Make sure that anybody stopping the queue after this
5662 * sees the new next_to_clean.
5663 */
5664 smp_mb();
5665 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5666 !(test_bit(__IGB_DOWN, &adapter->state))) {
5667 netif_wake_subqueue(netdev, tx_ring->queue_index);
5668
5669 u64_stats_update_begin(&tx_ring->tx_syncp);
5670 tx_ring->tx_stats.restart_queue++;
5671 u64_stats_update_end(&tx_ring->tx_syncp);
5672 }
5673 }
5674
5675 if (tx_ring->detect_tx_hung) {
5676 /* Detect a transmit hang in hardware, this serializes the
5677 * check with the clearing of time_stamp and movement of i */
5678 tx_ring->detect_tx_hung = false;
5679 if (tx_ring->buffer_info[i].time_stamp &&
5680 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
5681 (adapter->tx_timeout_factor * HZ)) &&
5682 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5683
5684 /* detected Tx unit hang */
5685 dev_err(tx_ring->dev,
5686 "Detected Tx Unit Hang\n"
5687 " Tx Queue <%d>\n"
5688 " TDH <%x>\n"
5689 " TDT <%x>\n"
5690 " next_to_use <%x>\n"
5691 " next_to_clean <%x>\n"
5692 "buffer_info[next_to_clean]\n"
5693 " time_stamp <%lx>\n"
5694 " next_to_watch <%x>\n"
5695 " jiffies <%lx>\n"
5696 " desc.status <%x>\n",
5697 tx_ring->queue_index,
5698 readl(tx_ring->head),
5699 readl(tx_ring->tail),
5700 tx_ring->next_to_use,
5701 tx_ring->next_to_clean,
5702 tx_ring->buffer_info[eop].time_stamp,
5703 eop,
5704 jiffies,
5705 eop_desc->wb.status);
5706 netif_stop_subqueue(netdev, tx_ring->queue_index);
5707 }
5708 }
5709 tx_ring->total_bytes += total_bytes;
5710 tx_ring->total_packets += total_packets;
5711 u64_stats_update_begin(&tx_ring->tx_syncp);
5712 tx_ring->tx_stats.bytes += total_bytes;
5713 tx_ring->tx_stats.packets += total_packets;
5714 u64_stats_update_end(&tx_ring->tx_syncp);
5715 return count < tx_ring->count;
5716}
5717
5718static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5719 u32 status_err, struct sk_buff *skb)
5720{
5721 skb_checksum_none_assert(skb);
5722
5723 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5724 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5725 (status_err & E1000_RXD_STAT_IXSM))
5726 return;
5727
5728 /* TCP/UDP checksum error bit is set */
5729 if (status_err &
5730 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
5731 /*
5732 * work around errata with sctp packets where the TCPE aka
5733 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5734 * packets, (aka let the stack check the crc32c)
5735 */
5736 if ((skb->len == 60) &&
5737 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5738 u64_stats_update_begin(&ring->rx_syncp);
5739 ring->rx_stats.csum_err++;
5740 u64_stats_update_end(&ring->rx_syncp);
5741 }
5742 /* let the stack verify checksum errors */
5743 return;
5744 }
5745 /* It must be a TCP or UDP packet with a valid checksum */
5746 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5747 skb->ip_summed = CHECKSUM_UNNECESSARY;
5748
5749 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5750}
5751
5752static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5753 struct sk_buff *skb)
5754{
5755 struct igb_adapter *adapter = q_vector->adapter;
5756 struct e1000_hw *hw = &adapter->hw;
5757 u64 regval;
5758
5759 /*
5760 * If this bit is set, then the RX registers contain the time stamp. No
5761 * other packet will be time stamped until we read these registers, so
5762 * read the registers to make them available again. Because only one
5763 * packet can be time stamped at a time, we know that the register
5764 * values must belong to this one here and therefore we don't need to
5765 * compare any of the additional attributes stored for it.
5766 *
5767 * If nothing went wrong, then it should have a shared tx_flags that we
5768 * can turn into a skb_shared_hwtstamps.
5769 */
5770 if (staterr & E1000_RXDADV_STAT_TSIP) {
5771 u32 *stamp = (u32 *)skb->data;
5772 regval = le32_to_cpu(*(stamp + 2));
5773 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5774 skb_pull(skb, IGB_TS_HDR_LEN);
5775 } else {
5776 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5777 return;
5778
5779 regval = rd32(E1000_RXSTMPL);
5780 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5781 }
5782
5783 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5784}
5785static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
5786 union e1000_adv_rx_desc *rx_desc)
5787{
5788 /* HW will not DMA in data larger than the given buffer, even if it
5789 * parses the (NFS, of course) header to be larger. In that case, it
5790 * fills the header buffer and spills the rest into the page.
5791 */
5792 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5793 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
5794 if (hlen > rx_ring->rx_buffer_len)
5795 hlen = rx_ring->rx_buffer_len;
5796 return hlen;
5797}
5798
5799static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5800 int *work_done, int budget)
5801{
5802 struct igb_ring *rx_ring = q_vector->rx_ring;
5803 struct net_device *netdev = rx_ring->netdev;
5804 struct device *dev = rx_ring->dev;
5805 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5806 struct igb_buffer *buffer_info , *next_buffer;
5807 struct sk_buff *skb;
5808 bool cleaned = false;
5809 int cleaned_count = 0;
5810 int current_node = numa_node_id();
5811 unsigned int total_bytes = 0, total_packets = 0;
5812 unsigned int i;
5813 u32 staterr;
5814 u16 length;
5815
5816 i = rx_ring->next_to_clean;
5817 buffer_info = &rx_ring->buffer_info[i];
5818 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5819 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5820
5821 while (staterr & E1000_RXD_STAT_DD) {
5822 if (*work_done >= budget)
5823 break;
5824 (*work_done)++;
5825 rmb(); /* read descriptor and rx_buffer_info after status DD */
5826
5827 skb = buffer_info->skb;
5828 prefetch(skb->data - NET_IP_ALIGN);
5829 buffer_info->skb = NULL;
5830
5831 i++;
5832 if (i == rx_ring->count)
5833 i = 0;
5834
5835 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5836 prefetch(next_rxd);
5837 next_buffer = &rx_ring->buffer_info[i];
5838
5839 length = le16_to_cpu(rx_desc->wb.upper.length);
5840 cleaned = true;
5841 cleaned_count++;
5842
5843 if (buffer_info->dma) {
5844 dma_unmap_single(dev, buffer_info->dma,
5845 rx_ring->rx_buffer_len,
5846 DMA_FROM_DEVICE);
5847 buffer_info->dma = 0;
5848 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5849 skb_put(skb, length);
5850 goto send_up;
5851 }
5852 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
5853 }
5854
5855 if (length) {
5856 dma_unmap_page(dev, buffer_info->page_dma,
5857 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5858 buffer_info->page_dma = 0;
5859
5860 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5861 buffer_info->page,
5862 buffer_info->page_offset,
5863 length);
5864
5865 if ((page_count(buffer_info->page) != 1) ||
5866 (page_to_nid(buffer_info->page) != current_node))
5867 buffer_info->page = NULL;
5868 else
5869 get_page(buffer_info->page);
5870
5871 skb->len += length;
5872 skb->data_len += length;
5873 skb->truesize += length;
5874 }
5875
5876 if (!(staterr & E1000_RXD_STAT_EOP)) {
5877 buffer_info->skb = next_buffer->skb;
5878 buffer_info->dma = next_buffer->dma;
5879 next_buffer->skb = skb;
5880 next_buffer->dma = 0;
5881 goto next_desc;
5882 }
5883send_up:
5884 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5885 dev_kfree_skb_irq(skb);
5886 goto next_desc;
5887 }
5888
5889 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5890 igb_rx_hwtstamp(q_vector, staterr, skb);
5891 total_bytes += skb->len;
5892 total_packets++;
5893
5894 igb_rx_checksum_adv(rx_ring, staterr, skb);
5895
5896 skb->protocol = eth_type_trans(skb, netdev);
5897 skb_record_rx_queue(skb, rx_ring->queue_index);
5898
5899 if (staterr & E1000_RXD_STAT_VP) {
5900 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5901
5902 __vlan_hwaccel_put_tag(skb, vid);
5903 }
5904 napi_gro_receive(&q_vector->napi, skb);
5905
5906next_desc:
5907 rx_desc->wb.upper.status_error = 0;
5908
5909 /* return some buffers to hardware, one at a time is too slow */
5910 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5911 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5912 cleaned_count = 0;
5913 }
5914
5915 /* use prefetched values */
5916 rx_desc = next_rxd;
5917 buffer_info = next_buffer;
5918 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5919 }
5920
5921 rx_ring->next_to_clean = i;
5922 cleaned_count = igb_desc_unused(rx_ring);
5923
5924 if (cleaned_count)
5925 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5926
5927 rx_ring->total_packets += total_packets;
5928 rx_ring->total_bytes += total_bytes;
5929 u64_stats_update_begin(&rx_ring->rx_syncp);
5930 rx_ring->rx_stats.packets += total_packets;
5931 rx_ring->rx_stats.bytes += total_bytes;
5932 u64_stats_update_end(&rx_ring->rx_syncp);
5933 return cleaned;
5934}
5935
5936/**
5937 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5938 * @adapter: address of board private structure
5939 **/
5940void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5941{
5942 struct net_device *netdev = rx_ring->netdev;
5943 union e1000_adv_rx_desc *rx_desc;
5944 struct igb_buffer *buffer_info;
5945 struct sk_buff *skb;
5946 unsigned int i;
5947 int bufsz;
5948
5949 i = rx_ring->next_to_use;
5950 buffer_info = &rx_ring->buffer_info[i];
5951
5952 bufsz = rx_ring->rx_buffer_len;
5953
5954 while (cleaned_count--) {
5955 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5956
5957 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5958 if (!buffer_info->page) {
5959 buffer_info->page = netdev_alloc_page(netdev);
5960 if (unlikely(!buffer_info->page)) {
5961 u64_stats_update_begin(&rx_ring->rx_syncp);
5962 rx_ring->rx_stats.alloc_failed++;
5963 u64_stats_update_end(&rx_ring->rx_syncp);
5964 goto no_buffers;
5965 }
5966 buffer_info->page_offset = 0;
5967 } else {
5968 buffer_info->page_offset ^= PAGE_SIZE / 2;
5969 }
5970 buffer_info->page_dma =
5971 dma_map_page(rx_ring->dev, buffer_info->page,
5972 buffer_info->page_offset,
5973 PAGE_SIZE / 2,
5974 DMA_FROM_DEVICE);
5975 if (dma_mapping_error(rx_ring->dev,
5976 buffer_info->page_dma)) {
5977 buffer_info->page_dma = 0;
5978 u64_stats_update_begin(&rx_ring->rx_syncp);
5979 rx_ring->rx_stats.alloc_failed++;
5980 u64_stats_update_end(&rx_ring->rx_syncp);
5981 goto no_buffers;
5982 }
5983 }
5984
5985 skb = buffer_info->skb;
5986 if (!skb) {
5987 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5988 if (unlikely(!skb)) {
5989 u64_stats_update_begin(&rx_ring->rx_syncp);
5990 rx_ring->rx_stats.alloc_failed++;
5991 u64_stats_update_end(&rx_ring->rx_syncp);
5992 goto no_buffers;
5993 }
5994
5995 buffer_info->skb = skb;
5996 }
5997 if (!buffer_info->dma) {
5998 buffer_info->dma = dma_map_single(rx_ring->dev,
5999 skb->data,
6000 bufsz,
6001 DMA_FROM_DEVICE);
6002 if (dma_mapping_error(rx_ring->dev,
6003 buffer_info->dma)) {
6004 buffer_info->dma = 0;
6005 u64_stats_update_begin(&rx_ring->rx_syncp);
6006 rx_ring->rx_stats.alloc_failed++;
6007 u64_stats_update_end(&rx_ring->rx_syncp);
6008 goto no_buffers;
6009 }
6010 }
6011 /* Refresh the desc even if buffer_addrs didn't change because
6012 * each write-back erases this info. */
6013 if (bufsz < IGB_RXBUFFER_1024) {
6014 rx_desc->read.pkt_addr =
6015 cpu_to_le64(buffer_info->page_dma);
6016 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
6017 } else {
6018 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
6019 rx_desc->read.hdr_addr = 0;
6020 }
6021
6022 i++;
6023 if (i == rx_ring->count)
6024 i = 0;
6025 buffer_info = &rx_ring->buffer_info[i];
6026 }
6027
6028no_buffers:
6029 if (rx_ring->next_to_use != i) {
6030 rx_ring->next_to_use = i;
6031 if (i == 0)
6032 i = (rx_ring->count - 1);
6033 else
6034 i--;
6035
6036 /* Force memory writes to complete before letting h/w
6037 * know there are new descriptors to fetch. (Only
6038 * applicable for weak-ordered memory model archs,
6039 * such as IA-64). */
6040 wmb();
6041 writel(i, rx_ring->tail);
6042 }
6043}
6044
6045/**
6046 * igb_mii_ioctl -
6047 * @netdev:
6048 * @ifreq:
6049 * @cmd:
6050 **/
6051static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6052{
6053 struct igb_adapter *adapter = netdev_priv(netdev);
6054 struct mii_ioctl_data *data = if_mii(ifr);
6055
6056 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6057 return -EOPNOTSUPP;
6058
6059 switch (cmd) {
6060 case SIOCGMIIPHY:
6061 data->phy_id = adapter->hw.phy.addr;
6062 break;
6063 case SIOCGMIIREG:
6064 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6065 &data->val_out))
6066 return -EIO;
6067 break;
6068 case SIOCSMIIREG:
6069 default:
6070 return -EOPNOTSUPP;
6071 }
6072 return 0;
6073}
6074
6075/**
6076 * igb_hwtstamp_ioctl - control hardware time stamping
6077 * @netdev:
6078 * @ifreq:
6079 * @cmd:
6080 *
6081 * Outgoing time stamping can be enabled and disabled. Play nice and
6082 * disable it when requested, although it shouldn't case any overhead
6083 * when no packet needs it. At most one packet in the queue may be
6084 * marked for time stamping, otherwise it would be impossible to tell
6085 * for sure to which packet the hardware time stamp belongs.
6086 *
6087 * Incoming time stamping has to be configured via the hardware
6088 * filters. Not all combinations are supported, in particular event
6089 * type has to be specified. Matching the kind of event packet is
6090 * not supported, with the exception of "all V2 events regardless of
6091 * level 2 or 4".
6092 *
6093 **/
6094static int igb_hwtstamp_ioctl(struct net_device *netdev,
6095 struct ifreq *ifr, int cmd)
6096{
6097 struct igb_adapter *adapter = netdev_priv(netdev);
6098 struct e1000_hw *hw = &adapter->hw;
6099 struct hwtstamp_config config;
6100 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6101 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6102 u32 tsync_rx_cfg = 0;
6103 bool is_l4 = false;
6104 bool is_l2 = false;
6105 u32 regval;
6106
6107 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6108 return -EFAULT;
6109
6110 /* reserved for future extensions */
6111 if (config.flags)
6112 return -EINVAL;
6113
6114 switch (config.tx_type) {
6115 case HWTSTAMP_TX_OFF:
6116 tsync_tx_ctl = 0;
6117 case HWTSTAMP_TX_ON:
6118 break;
6119 default:
6120 return -ERANGE;
6121 }
6122
6123 switch (config.rx_filter) {
6124 case HWTSTAMP_FILTER_NONE:
6125 tsync_rx_ctl = 0;
6126 break;
6127 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6128 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6129 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6130 case HWTSTAMP_FILTER_ALL:
6131 /*
6132 * register TSYNCRXCFG must be set, therefore it is not
6133 * possible to time stamp both Sync and Delay_Req messages
6134 * => fall back to time stamping all packets
6135 */
6136 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6137 config.rx_filter = HWTSTAMP_FILTER_ALL;
6138 break;
6139 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6140 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6141 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
6142 is_l4 = true;
6143 break;
6144 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6145 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6146 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
6147 is_l4 = true;
6148 break;
6149 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6150 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6151 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6152 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
6153 is_l2 = true;
6154 is_l4 = true;
6155 config.rx_filter = HWTSTAMP_FILTER_SOME;
6156 break;
6157 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6158 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6159 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6160 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
6161 is_l2 = true;
6162 is_l4 = true;
6163 config.rx_filter = HWTSTAMP_FILTER_SOME;
6164 break;
6165 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6166 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6167 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6168 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
6169 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
6170 is_l2 = true;
6171 break;
6172 default:
6173 return -ERANGE;
6174 }
6175
6176 if (hw->mac.type == e1000_82575) {
6177 if (tsync_rx_ctl | tsync_tx_ctl)
6178 return -EINVAL;
6179 return 0;
6180 }
6181
6182 /*
6183 * Per-packet timestamping only works if all packets are
6184 * timestamped, so enable timestamping in all packets as
6185 * long as one rx filter was configured.
6186 */
6187 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6188 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6189 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6190 }
6191
6192 /* enable/disable TX */
6193 regval = rd32(E1000_TSYNCTXCTL);
6194 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6195 regval |= tsync_tx_ctl;
6196 wr32(E1000_TSYNCTXCTL, regval);
6197
6198 /* enable/disable RX */
6199 regval = rd32(E1000_TSYNCRXCTL);
6200 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6201 regval |= tsync_rx_ctl;
6202 wr32(E1000_TSYNCRXCTL, regval);
6203
6204 /* define which PTP packets are time stamped */
6205 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6206
6207 /* define ethertype filter for timestamped packets */
6208 if (is_l2)
6209 wr32(E1000_ETQF(3),
6210 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6211 E1000_ETQF_1588 | /* enable timestamping */
6212 ETH_P_1588)); /* 1588 eth protocol type */
6213 else
6214 wr32(E1000_ETQF(3), 0);
6215
6216#define PTP_PORT 319
6217 /* L4 Queue Filter[3]: filter by destination port and protocol */
6218 if (is_l4) {
6219 u32 ftqf = (IPPROTO_UDP /* UDP */
6220 | E1000_FTQF_VF_BP /* VF not compared */
6221 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6222 | E1000_FTQF_MASK); /* mask all inputs */
6223 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
6224
6225 wr32(E1000_IMIR(3), htons(PTP_PORT));
6226 wr32(E1000_IMIREXT(3),
6227 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6228 if (hw->mac.type == e1000_82576) {
6229 /* enable source port check */
6230 wr32(E1000_SPQF(3), htons(PTP_PORT));
6231 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6232 }
6233 wr32(E1000_FTQF(3), ftqf);
6234 } else {
6235 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6236 }
6237 wrfl();
6238
6239 adapter->hwtstamp_config = config;
6240
6241 /* clear TX/RX time stamp registers, just to be sure */
6242 regval = rd32(E1000_TXSTMPH);
6243 regval = rd32(E1000_RXSTMPH);
6244
6245 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6246 -EFAULT : 0;
6247}
6248
6249/**
6250 * igb_ioctl -
6251 * @netdev:
6252 * @ifreq:
6253 * @cmd:
6254 **/
6255static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6256{
6257 switch (cmd) {
6258 case SIOCGMIIPHY:
6259 case SIOCGMIIREG:
6260 case SIOCSMIIREG:
6261 return igb_mii_ioctl(netdev, ifr, cmd);
6262 case SIOCSHWTSTAMP:
6263 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
6264 default:
6265 return -EOPNOTSUPP;
6266 }
6267}
6268
6269s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6270{
6271 struct igb_adapter *adapter = hw->back;
6272 u16 cap_offset;
6273
6274 cap_offset = adapter->pdev->pcie_cap;
6275 if (!cap_offset)
6276 return -E1000_ERR_CONFIG;
6277
6278 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6279
6280 return 0;
6281}
6282
6283s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6284{
6285 struct igb_adapter *adapter = hw->back;
6286 u16 cap_offset;
6287
6288 cap_offset = adapter->pdev->pcie_cap;
6289 if (!cap_offset)
6290 return -E1000_ERR_CONFIG;
6291
6292 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6293
6294 return 0;
6295}
6296
6297static void igb_vlan_mode(struct net_device *netdev, u32 features)
6298{
6299 struct igb_adapter *adapter = netdev_priv(netdev);
6300 struct e1000_hw *hw = &adapter->hw;
6301 u32 ctrl, rctl;
6302
6303 igb_irq_disable(adapter);
6304
6305 if (features & NETIF_F_HW_VLAN_RX) {
6306 /* enable VLAN tag insert/strip */
6307 ctrl = rd32(E1000_CTRL);
6308 ctrl |= E1000_CTRL_VME;
6309 wr32(E1000_CTRL, ctrl);
6310
6311 /* Disable CFI check */
6312 rctl = rd32(E1000_RCTL);
6313 rctl &= ~E1000_RCTL_CFIEN;
6314 wr32(E1000_RCTL, rctl);
6315 } else {
6316 /* disable VLAN tag insert/strip */
6317 ctrl = rd32(E1000_CTRL);
6318 ctrl &= ~E1000_CTRL_VME;
6319 wr32(E1000_CTRL, ctrl);
6320 }
6321
6322 igb_rlpml_set(adapter);
6323
6324 if (!test_bit(__IGB_DOWN, &adapter->state))
6325 igb_irq_enable(adapter);
6326}
6327
6328static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6329{
6330 struct igb_adapter *adapter = netdev_priv(netdev);
6331 struct e1000_hw *hw = &adapter->hw;
6332 int pf_id = adapter->vfs_allocated_count;
6333
6334 /* attempt to add filter to vlvf array */
6335 igb_vlvf_set(adapter, vid, true, pf_id);
6336
6337 /* add the filter since PF can receive vlans w/o entry in vlvf */
6338 igb_vfta_set(hw, vid, true);
6339
6340 set_bit(vid, adapter->active_vlans);
6341}
6342
6343static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6344{
6345 struct igb_adapter *adapter = netdev_priv(netdev);
6346 struct e1000_hw *hw = &adapter->hw;
6347 int pf_id = adapter->vfs_allocated_count;
6348 s32 err;
6349
6350 igb_irq_disable(adapter);
6351
6352 if (!test_bit(__IGB_DOWN, &adapter->state))
6353 igb_irq_enable(adapter);
6354
6355 /* remove vlan from VLVF table array */
6356 err = igb_vlvf_set(adapter, vid, false, pf_id);
6357
6358 /* if vid was not present in VLVF just remove it from table */
6359 if (err)
6360 igb_vfta_set(hw, vid, false);
6361
6362 clear_bit(vid, adapter->active_vlans);
6363}
6364
6365static void igb_restore_vlan(struct igb_adapter *adapter)
6366{
6367 u16 vid;
6368
6369 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6370 igb_vlan_rx_add_vid(adapter->netdev, vid);
6371}
6372
6373int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6374{
6375 struct pci_dev *pdev = adapter->pdev;
6376 struct e1000_mac_info *mac = &adapter->hw.mac;
6377
6378 mac->autoneg = 0;
6379
6380 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6381 * for the switch() below to work */
6382 if ((spd & 1) || (dplx & ~1))
6383 goto err_inval;
6384
6385 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6386 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6387 spd != SPEED_1000 &&
6388 dplx != DUPLEX_FULL)
6389 goto err_inval;
6390
6391 switch (spd + dplx) {
6392 case SPEED_10 + DUPLEX_HALF:
6393 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6394 break;
6395 case SPEED_10 + DUPLEX_FULL:
6396 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6397 break;
6398 case SPEED_100 + DUPLEX_HALF:
6399 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6400 break;
6401 case SPEED_100 + DUPLEX_FULL:
6402 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6403 break;
6404 case SPEED_1000 + DUPLEX_FULL:
6405 mac->autoneg = 1;
6406 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6407 break;
6408 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6409 default:
6410 goto err_inval;
6411 }
6412 return 0;
6413
6414err_inval:
6415 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6416 return -EINVAL;
6417}
6418
6419static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
6420{
6421 struct net_device *netdev = pci_get_drvdata(pdev);
6422 struct igb_adapter *adapter = netdev_priv(netdev);
6423 struct e1000_hw *hw = &adapter->hw;
6424 u32 ctrl, rctl, status;
6425 u32 wufc = adapter->wol;
6426#ifdef CONFIG_PM
6427 int retval = 0;
6428#endif
6429
6430 netif_device_detach(netdev);
6431
6432 if (netif_running(netdev))
6433 igb_close(netdev);
6434
6435 igb_clear_interrupt_scheme(adapter);
6436
6437#ifdef CONFIG_PM
6438 retval = pci_save_state(pdev);
6439 if (retval)
6440 return retval;
6441#endif
6442
6443 status = rd32(E1000_STATUS);
6444 if (status & E1000_STATUS_LU)
6445 wufc &= ~E1000_WUFC_LNKC;
6446
6447 if (wufc) {
6448 igb_setup_rctl(adapter);
6449 igb_set_rx_mode(netdev);
6450
6451 /* turn on all-multi mode if wake on multicast is enabled */
6452 if (wufc & E1000_WUFC_MC) {
6453 rctl = rd32(E1000_RCTL);
6454 rctl |= E1000_RCTL_MPE;
6455 wr32(E1000_RCTL, rctl);
6456 }
6457
6458 ctrl = rd32(E1000_CTRL);
6459 /* advertise wake from D3Cold */
6460 #define E1000_CTRL_ADVD3WUC 0x00100000
6461 /* phy power management enable */
6462 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6463 ctrl |= E1000_CTRL_ADVD3WUC;
6464 wr32(E1000_CTRL, ctrl);
6465
6466 /* Allow time for pending master requests to run */
6467 igb_disable_pcie_master(hw);
6468
6469 wr32(E1000_WUC, E1000_WUC_PME_EN);
6470 wr32(E1000_WUFC, wufc);
6471 } else {
6472 wr32(E1000_WUC, 0);
6473 wr32(E1000_WUFC, 0);
6474 }
6475
6476 *enable_wake = wufc || adapter->en_mng_pt;
6477 if (!*enable_wake)
6478 igb_power_down_link(adapter);
6479 else
6480 igb_power_up_link(adapter);
6481
6482 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6483 * would have already happened in close and is redundant. */
6484 igb_release_hw_control(adapter);
6485
6486 pci_disable_device(pdev);
6487
6488 return 0;
6489}
6490
6491#ifdef CONFIG_PM
6492static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6493{
6494 int retval;
6495 bool wake;
6496
6497 retval = __igb_shutdown(pdev, &wake);
6498 if (retval)
6499 return retval;
6500
6501 if (wake) {
6502 pci_prepare_to_sleep(pdev);
6503 } else {
6504 pci_wake_from_d3(pdev, false);
6505 pci_set_power_state(pdev, PCI_D3hot);
6506 }
6507
6508 return 0;
6509}
6510
6511static int igb_resume(struct pci_dev *pdev)
6512{
6513 struct net_device *netdev = pci_get_drvdata(pdev);
6514 struct igb_adapter *adapter = netdev_priv(netdev);
6515 struct e1000_hw *hw = &adapter->hw;
6516 u32 err;
6517
6518 pci_set_power_state(pdev, PCI_D0);
6519 pci_restore_state(pdev);
6520 pci_save_state(pdev);
6521
6522 err = pci_enable_device_mem(pdev);
6523 if (err) {
6524 dev_err(&pdev->dev,
6525 "igb: Cannot enable PCI device from suspend\n");
6526 return err;
6527 }
6528 pci_set_master(pdev);
6529
6530 pci_enable_wake(pdev, PCI_D3hot, 0);
6531 pci_enable_wake(pdev, PCI_D3cold, 0);
6532
6533 if (igb_init_interrupt_scheme(adapter)) {
6534 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6535 return -ENOMEM;
6536 }
6537
6538 igb_reset(adapter);
6539
6540 /* let the f/w know that the h/w is now under the control of the
6541 * driver. */
6542 igb_get_hw_control(adapter);
6543
6544 wr32(E1000_WUS, ~0);
6545
6546 if (netif_running(netdev)) {
6547 err = igb_open(netdev);
6548 if (err)
6549 return err;
6550 }
6551
6552 netif_device_attach(netdev);
6553
6554 return 0;
6555}
6556#endif
6557
6558static void igb_shutdown(struct pci_dev *pdev)
6559{
6560 bool wake;
6561
6562 __igb_shutdown(pdev, &wake);
6563
6564 if (system_state == SYSTEM_POWER_OFF) {
6565 pci_wake_from_d3(pdev, wake);
6566 pci_set_power_state(pdev, PCI_D3hot);
6567 }
6568}
6569
6570#ifdef CONFIG_NET_POLL_CONTROLLER
6571/*
6572 * Polling 'interrupt' - used by things like netconsole to send skbs
6573 * without having to re-enable interrupts. It's not called while
6574 * the interrupt routine is executing.
6575 */
6576static void igb_netpoll(struct net_device *netdev)
6577{
6578 struct igb_adapter *adapter = netdev_priv(netdev);
6579 struct e1000_hw *hw = &adapter->hw;
6580 int i;
6581
6582 if (!adapter->msix_entries) {
6583 struct igb_q_vector *q_vector = adapter->q_vector[0];
6584 igb_irq_disable(adapter);
6585 napi_schedule(&q_vector->napi);
6586 return;
6587 }
6588
6589 for (i = 0; i < adapter->num_q_vectors; i++) {
6590 struct igb_q_vector *q_vector = adapter->q_vector[i];
6591 wr32(E1000_EIMC, q_vector->eims_value);
6592 napi_schedule(&q_vector->napi);
6593 }
6594}
6595#endif /* CONFIG_NET_POLL_CONTROLLER */
6596
6597/**
6598 * igb_io_error_detected - called when PCI error is detected
6599 * @pdev: Pointer to PCI device
6600 * @state: The current pci connection state
6601 *
6602 * This function is called after a PCI bus error affecting
6603 * this device has been detected.
6604 */
6605static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6606 pci_channel_state_t state)
6607{
6608 struct net_device *netdev = pci_get_drvdata(pdev);
6609 struct igb_adapter *adapter = netdev_priv(netdev);
6610
6611 netif_device_detach(netdev);
6612
6613 if (state == pci_channel_io_perm_failure)
6614 return PCI_ERS_RESULT_DISCONNECT;
6615
6616 if (netif_running(netdev))
6617 igb_down(adapter);
6618 pci_disable_device(pdev);
6619
6620 /* Request a slot slot reset. */
6621 return PCI_ERS_RESULT_NEED_RESET;
6622}
6623
6624/**
6625 * igb_io_slot_reset - called after the pci bus has been reset.
6626 * @pdev: Pointer to PCI device
6627 *
6628 * Restart the card from scratch, as if from a cold-boot. Implementation
6629 * resembles the first-half of the igb_resume routine.
6630 */
6631static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6632{
6633 struct net_device *netdev = pci_get_drvdata(pdev);
6634 struct igb_adapter *adapter = netdev_priv(netdev);
6635 struct e1000_hw *hw = &adapter->hw;
6636 pci_ers_result_t result;
6637 int err;
6638
6639 if (pci_enable_device_mem(pdev)) {
6640 dev_err(&pdev->dev,
6641 "Cannot re-enable PCI device after reset.\n");
6642 result = PCI_ERS_RESULT_DISCONNECT;
6643 } else {
6644 pci_set_master(pdev);
6645 pci_restore_state(pdev);
6646 pci_save_state(pdev);
6647
6648 pci_enable_wake(pdev, PCI_D3hot, 0);
6649 pci_enable_wake(pdev, PCI_D3cold, 0);
6650
6651 igb_reset(adapter);
6652 wr32(E1000_WUS, ~0);
6653 result = PCI_ERS_RESULT_RECOVERED;
6654 }
6655
6656 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6657 if (err) {
6658 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6659 "failed 0x%0x\n", err);
6660 /* non-fatal, continue */
6661 }
6662
6663 return result;
6664}
6665
6666/**
6667 * igb_io_resume - called when traffic can start flowing again.
6668 * @pdev: Pointer to PCI device
6669 *
6670 * This callback is called when the error recovery driver tells us that
6671 * its OK to resume normal operation. Implementation resembles the
6672 * second-half of the igb_resume routine.
6673 */
6674static void igb_io_resume(struct pci_dev *pdev)
6675{
6676 struct net_device *netdev = pci_get_drvdata(pdev);
6677 struct igb_adapter *adapter = netdev_priv(netdev);
6678
6679 if (netif_running(netdev)) {
6680 if (igb_up(adapter)) {
6681 dev_err(&pdev->dev, "igb_up failed after reset\n");
6682 return;
6683 }
6684 }
6685
6686 netif_device_attach(netdev);
6687
6688 /* let the f/w know that the h/w is now under the control of the
6689 * driver. */
6690 igb_get_hw_control(adapter);
6691}
6692
6693static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6694 u8 qsel)
6695{
6696 u32 rar_low, rar_high;
6697 struct e1000_hw *hw = &adapter->hw;
6698
6699 /* HW expects these in little endian so we reverse the byte order
6700 * from network order (big endian) to little endian
6701 */
6702 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6703 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6704 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6705
6706 /* Indicate to hardware the Address is Valid. */
6707 rar_high |= E1000_RAH_AV;
6708
6709 if (hw->mac.type == e1000_82575)
6710 rar_high |= E1000_RAH_POOL_1 * qsel;
6711 else
6712 rar_high |= E1000_RAH_POOL_1 << qsel;
6713
6714 wr32(E1000_RAL(index), rar_low);
6715 wrfl();
6716 wr32(E1000_RAH(index), rar_high);
6717 wrfl();
6718}
6719
6720static int igb_set_vf_mac(struct igb_adapter *adapter,
6721 int vf, unsigned char *mac_addr)
6722{
6723 struct e1000_hw *hw = &adapter->hw;
6724 /* VF MAC addresses start at end of receive addresses and moves
6725 * torwards the first, as a result a collision should not be possible */
6726 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
6727
6728 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
6729
6730 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
6731
6732 return 0;
6733}
6734
6735static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6736{
6737 struct igb_adapter *adapter = netdev_priv(netdev);
6738 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6739 return -EINVAL;
6740 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6741 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6742 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6743 " change effective.");
6744 if (test_bit(__IGB_DOWN, &adapter->state)) {
6745 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6746 " but the PF device is not up.\n");
6747 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6748 " attempting to use the VF device.\n");
6749 }
6750 return igb_set_vf_mac(adapter, vf, mac);
6751}
6752
6753static int igb_link_mbps(int internal_link_speed)
6754{
6755 switch (internal_link_speed) {
6756 case SPEED_100:
6757 return 100;
6758 case SPEED_1000:
6759 return 1000;
6760 default:
6761 return 0;
6762 }
6763}
6764
6765static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6766 int link_speed)
6767{
6768 int rf_dec, rf_int;
6769 u32 bcnrc_val;
6770
6771 if (tx_rate != 0) {
6772 /* Calculate the rate factor values to set */
6773 rf_int = link_speed / tx_rate;
6774 rf_dec = (link_speed - (rf_int * tx_rate));
6775 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6776
6777 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6778 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6779 E1000_RTTBCNRC_RF_INT_MASK);
6780 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6781 } else {
6782 bcnrc_val = 0;
6783 }
6784
6785 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6786 wr32(E1000_RTTBCNRC, bcnrc_val);
6787}
6788
6789static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6790{
6791 int actual_link_speed, i;
6792 bool reset_rate = false;
6793
6794 /* VF TX rate limit was not set or not supported */
6795 if ((adapter->vf_rate_link_speed == 0) ||
6796 (adapter->hw.mac.type != e1000_82576))
6797 return;
6798
6799 actual_link_speed = igb_link_mbps(adapter->link_speed);
6800 if (actual_link_speed != adapter->vf_rate_link_speed) {
6801 reset_rate = true;
6802 adapter->vf_rate_link_speed = 0;
6803 dev_info(&adapter->pdev->dev,
6804 "Link speed has been changed. VF Transmit "
6805 "rate is disabled\n");
6806 }
6807
6808 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6809 if (reset_rate)
6810 adapter->vf_data[i].tx_rate = 0;
6811
6812 igb_set_vf_rate_limit(&adapter->hw, i,
6813 adapter->vf_data[i].tx_rate,
6814 actual_link_speed);
6815 }
6816}
6817
6818static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6819{
6820 struct igb_adapter *adapter = netdev_priv(netdev);
6821 struct e1000_hw *hw = &adapter->hw;
6822 int actual_link_speed;
6823
6824 if (hw->mac.type != e1000_82576)
6825 return -EOPNOTSUPP;
6826
6827 actual_link_speed = igb_link_mbps(adapter->link_speed);
6828 if ((vf >= adapter->vfs_allocated_count) ||
6829 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6830 (tx_rate < 0) || (tx_rate > actual_link_speed))
6831 return -EINVAL;
6832
6833 adapter->vf_rate_link_speed = actual_link_speed;
6834 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6835 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6836
6837 return 0;
6838}
6839
6840static int igb_ndo_get_vf_config(struct net_device *netdev,
6841 int vf, struct ifla_vf_info *ivi)
6842{
6843 struct igb_adapter *adapter = netdev_priv(netdev);
6844 if (vf >= adapter->vfs_allocated_count)
6845 return -EINVAL;
6846 ivi->vf = vf;
6847 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6848 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
6849 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6850 ivi->qos = adapter->vf_data[vf].pf_qos;
6851 return 0;
6852}
6853
6854static void igb_vmm_control(struct igb_adapter *adapter)
6855{
6856 struct e1000_hw *hw = &adapter->hw;
6857 u32 reg;
6858
6859 switch (hw->mac.type) {
6860 case e1000_82575:
6861 default:
6862 /* replication is not supported for 82575 */
6863 return;
6864 case e1000_82576:
6865 /* notify HW that the MAC is adding vlan tags */
6866 reg = rd32(E1000_DTXCTL);
6867 reg |= E1000_DTXCTL_VLAN_ADDED;
6868 wr32(E1000_DTXCTL, reg);
6869 case e1000_82580:
6870 /* enable replication vlan tag stripping */
6871 reg = rd32(E1000_RPLOLR);
6872 reg |= E1000_RPLOLR_STRVLAN;
6873 wr32(E1000_RPLOLR, reg);
6874 case e1000_i350:
6875 /* none of the above registers are supported by i350 */
6876 break;
6877 }
6878
6879 if (adapter->vfs_allocated_count) {
6880 igb_vmdq_set_loopback_pf(hw, true);
6881 igb_vmdq_set_replication_pf(hw, true);
6882 igb_vmdq_set_anti_spoofing_pf(hw, true,
6883 adapter->vfs_allocated_count);
6884 } else {
6885 igb_vmdq_set_loopback_pf(hw, false);
6886 igb_vmdq_set_replication_pf(hw, false);
6887 }
6888}
6889
6890/* igb_main.c */