aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2008-01-24 05:22:38 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:10:33 -0500
commit9d5c824399dea881779d78a6c147288bf2dccb6b (patch)
tree8c76b20c3cf1d81a63973e97578cea6a8a82a354
parentb491edd5817f1618f4e06d67638739591a714bdb (diff)
igb: PCI-Express 82575 Gigabit Ethernet driver
We are pleased to announce a new Gigabit Ethernet product and its driver to the linux community. This product is the Intel(R) 82575 Gigabit Ethernet adapter family. Physical adapters will be available to the public soon. These adapters come in 2- and 4-port versions (copper PHY) currently. Other variants will be available later. The 82575 chipset supports significantly different features that warrant a new driver. The descriptor format is (just like the ixgbe driver) different. The device can use multiple MSI-X vectors and multiple queues for both send and receive. This allows us to optimize some of the driver code specifically as well compared to the e1000-supported devices. This version of the igb driver no lnger uses fake netdevices and incorporates napi_struct members for each ring to do the multi- queue polling. multi-queue is enabled by default and the driver supports NAPI mode only. All the namespace collisions should be gone in this version too. The register macro's have been condensed to improve readability. Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/Kconfig22
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/igb/Makefile37
-rw-r--r--drivers/net/igb/e1000_82575.c1269
-rw-r--r--drivers/net/igb/e1000_82575.h150
-rw-r--r--drivers/net/igb/e1000_defines.h772
-rw-r--r--drivers/net/igb/e1000_hw.h599
-rw-r--r--drivers/net/igb/e1000_mac.c1505
-rw-r--r--drivers/net/igb/e1000_mac.h98
-rw-r--r--drivers/net/igb/e1000_nvm.c605
-rw-r--r--drivers/net/igb/e1000_nvm.h40
-rw-r--r--drivers/net/igb/e1000_phy.c1807
-rw-r--r--drivers/net/igb/e1000_phy.h98
-rw-r--r--drivers/net/igb/e1000_regs.h270
-rw-r--r--drivers/net/igb/igb.h300
-rw-r--r--drivers/net/igb/igb_ethtool.c1927
-rw-r--r--drivers/net/igb/igb_main.c4138
17 files changed, 13638 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f87d9ff3311a..af40ff434def 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2019,6 +2019,28 @@ config IP1000
2019 To compile this driver as a module, choose M here: the module 2019 To compile this driver as a module, choose M here: the module
2020 will be called ipg. This is recommended. 2020 will be called ipg. This is recommended.
2021 2021
2022config IGB
2023 tristate "Intel(R) 82575 PCI-Express Gigabit Ethernet support"
2024 depends on PCI
2025 ---help---
2026 This driver supports Intel(R) 82575 gigabit ethernet family of
2027 adapters. For more information on how to identify your adapter, go
2028 to the Adapter & Driver ID Guide at:
2029
2030 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2031
2032 For general information and support, go to the Intel support
2033 website at:
2034
2035 <http://support.intel.com>
2036
2037 More specific information on configuring the driver is in
2038 <file:Documentation/networking/e1000.txt>.
2039
2040 To compile this driver as a module, choose M here and read
2041 <file:Documentation/networking/net-modules.txt>. The module
2042 will be called igb.
2043
2022source "drivers/net/ixp2000/Kconfig" 2044source "drivers/net/ixp2000/Kconfig"
2023 2045
2024config MYRI_SBUS 2046config MYRI_SBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 79fe3f158af1..9fc7794e88ea 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_E1000E) += e1000e/ 6obj-$(CONFIG_E1000E) += e1000e/
7obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 7obj-$(CONFIG_IBM_EMAC) += ibm_emac/
8obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ 8obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
9obj-$(CONFIG_IGB) += igb/
9obj-$(CONFIG_IXGBE) += ixgbe/ 10obj-$(CONFIG_IXGBE) += ixgbe/
10obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
11obj-$(CONFIG_IP1000) += ipg.o 12obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
new file mode 100644
index 000000000000..1927b3fd6f05
--- /dev/null
+++ b/drivers/net/igb/Makefile
@@ -0,0 +1,37 @@
1################################################################################
2#
3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29#
30# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
31#
32
33obj-$(CONFIG_IGB) += igb.o
34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o
37
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
new file mode 100644
index 000000000000..cda3ec879090
--- /dev/null
+++ b/drivers/net/igb/e1000_82575.c
@@ -0,0 +1,1269 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* e1000_82575
29 * e1000_82576
30 */
31
32#include <linux/types.h>
33#include <linux/slab.h>
34
35#include "e1000_mac.h"
36#include "e1000_82575.h"
37
38static s32 igb_get_invariants_82575(struct e1000_hw *);
39static s32 igb_acquire_phy_82575(struct e1000_hw *);
40static void igb_release_phy_82575(struct e1000_hw *);
41static s32 igb_acquire_nvm_82575(struct e1000_hw *);
42static void igb_release_nvm_82575(struct e1000_hw *);
43static s32 igb_check_for_link_82575(struct e1000_hw *);
44static s32 igb_get_cfg_done_82575(struct e1000_hw *);
45static s32 igb_init_hw_82575(struct e1000_hw *);
46static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
47static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
48static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32);
49static s32 igb_reset_hw_82575(struct e1000_hw *);
50static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
51static s32 igb_setup_copper_link_82575(struct e1000_hw *);
52static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *);
53static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
54static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
55static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
56static s32 igb_configure_pcs_link_82575(struct e1000_hw *);
57static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
58 u16 *);
59static s32 igb_get_phy_id_82575(struct e1000_hw *);
60static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
61static bool igb_sgmii_active_82575(struct e1000_hw *);
62static s32 igb_reset_init_script_82575(struct e1000_hw *);
63static s32 igb_read_mac_addr_82575(struct e1000_hw *);
64
65
66struct e1000_dev_spec_82575 {
67 bool sgmii_active;
68};
69
70static s32 igb_get_invariants_82575(struct e1000_hw *hw)
71{
72 struct e1000_phy_info *phy = &hw->phy;
73 struct e1000_nvm_info *nvm = &hw->nvm;
74 struct e1000_mac_info *mac = &hw->mac;
75 struct e1000_dev_spec_82575 *dev_spec;
76 u32 eecd;
77 s32 ret_val;
78 u16 size;
79 u32 ctrl_ext = 0;
80
81 switch (hw->device_id) {
82 case E1000_DEV_ID_82575EB_COPPER:
83 case E1000_DEV_ID_82575EB_FIBER_SERDES:
84 case E1000_DEV_ID_82575GB_QUAD_COPPER:
85 mac->type = e1000_82575;
86 break;
87 default:
88 return -E1000_ERR_MAC_INIT;
89 break;
90 }
91
92 /* MAC initialization */
93 hw->dev_spec_size = sizeof(struct e1000_dev_spec_82575);
94
95 /* Device-specific structure allocation */
96 hw->dev_spec = kzalloc(hw->dev_spec_size, GFP_KERNEL);
97
98 if (!hw->dev_spec)
99 return -ENOMEM;
100
101 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
102
103 /* Set media type */
104 /*
105 * The 82575 uses bits 22:23 for link mode. The mode can be changed
106 * based on the EEPROM. We cannot rely upon device ID. There
107 * is no distinguishable difference between fiber and internal
108 * SerDes mode on the 82575. There can be an external PHY attached
109 * on the SGMII interface. For this, we'll set sgmii_active to true.
110 */
111 phy->media_type = e1000_media_type_copper;
112 dev_spec->sgmii_active = false;
113
114 ctrl_ext = rd32(E1000_CTRL_EXT);
115 if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) ==
116 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) {
117 hw->phy.media_type = e1000_media_type_internal_serdes;
118 ctrl_ext |= E1000_CTRL_I2C_ENA;
119 } else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) {
120 dev_spec->sgmii_active = true;
121 ctrl_ext |= E1000_CTRL_I2C_ENA;
122 } else {
123 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
124 }
125 wr32(E1000_CTRL_EXT, ctrl_ext);
126
127 /* Set mta register count */
128 mac->mta_reg_count = 128;
129 /* Set rar entry count */
130 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
131 /* Set if part includes ASF firmware */
132 mac->asf_firmware_present = true;
133 /* Set if manageability features are enabled. */
134 mac->arc_subsystem_valid =
135 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
136 ? true : false;
137
138 /* physical interface link setup */
139 mac->ops.setup_physical_interface =
140 (hw->phy.media_type == e1000_media_type_copper)
141 ? igb_setup_copper_link_82575
142 : igb_setup_fiber_serdes_link_82575;
143
144 /* NVM initialization */
145 eecd = rd32(E1000_EECD);
146
147 nvm->opcode_bits = 8;
148 nvm->delay_usec = 1;
149 switch (nvm->override) {
150 case e1000_nvm_override_spi_large:
151 nvm->page_size = 32;
152 nvm->address_bits = 16;
153 break;
154 case e1000_nvm_override_spi_small:
155 nvm->page_size = 8;
156 nvm->address_bits = 8;
157 break;
158 default:
159 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
160 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
161 break;
162 }
163
164 nvm->type = e1000_nvm_eeprom_spi;
165
166 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
167 E1000_EECD_SIZE_EX_SHIFT);
168
169 /*
170 * Added to a constant, "size" becomes the left-shift value
171 * for setting word_size.
172 */
173 size += NVM_WORD_SIZE_BASE_SHIFT;
174 nvm->word_size = 1 << size;
175
176 /* setup PHY parameters */
177 if (phy->media_type != e1000_media_type_copper) {
178 phy->type = e1000_phy_none;
179 return 0;
180 }
181
182 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
183 phy->reset_delay_us = 100;
184
185 /* PHY function pointers */
186 if (igb_sgmii_active_82575(hw)) {
187 phy->ops.reset_phy = igb_phy_hw_reset_sgmii_82575;
188 phy->ops.read_phy_reg = igb_read_phy_reg_sgmii_82575;
189 phy->ops.write_phy_reg = igb_write_phy_reg_sgmii_82575;
190 } else {
191 phy->ops.reset_phy = igb_phy_hw_reset;
192 phy->ops.read_phy_reg = igb_read_phy_reg_igp;
193 phy->ops.write_phy_reg = igb_write_phy_reg_igp;
194 }
195
196 /* Set phy->phy_addr and phy->id. */
197 ret_val = igb_get_phy_id_82575(hw);
198 if (ret_val)
199 return ret_val;
200
201 /* Verify phy id and set remaining function pointers */
202 switch (phy->id) {
203 case M88E1111_I_PHY_ID:
204 phy->type = e1000_phy_m88;
205 phy->ops.get_phy_info = igb_get_phy_info_m88;
206 phy->ops.get_cable_length = igb_get_cable_length_m88;
207 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
208 break;
209 case IGP03E1000_E_PHY_ID:
210 phy->type = e1000_phy_igp_3;
211 phy->ops.get_phy_info = igb_get_phy_info_igp;
212 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
213 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
214 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
215 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
216 break;
217 default:
218 return -E1000_ERR_PHY;
219 }
220
221 return 0;
222}
223
224/**
225 * e1000_acquire_phy_82575 - Acquire rights to access PHY
226 * @hw: pointer to the HW structure
227 *
228 * Acquire access rights to the correct PHY. This is a
229 * function pointer entry point called by the api module.
230 **/
231static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
232{
233 u16 mask;
234
235 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
236
237 return igb_acquire_swfw_sync_82575(hw, mask);
238}
239
240/**
241 * e1000_release_phy_82575 - Release rights to access PHY
242 * @hw: pointer to the HW structure
243 *
244 * A wrapper to release access rights to the correct PHY. This is a
245 * function pointer entry point called by the api module.
246 **/
247static void igb_release_phy_82575(struct e1000_hw *hw)
248{
249 u16 mask;
250
251 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
252 igb_release_swfw_sync_82575(hw, mask);
253}
254
255/**
256 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
257 * @hw: pointer to the HW structure
258 * @offset: register offset to be read
259 * @data: pointer to the read data
260 *
261 * Reads the PHY register at offset using the serial gigabit media independent
262 * interface and stores the retrieved information in data.
263 **/
264static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
265 u16 *data)
266{
267 struct e1000_phy_info *phy = &hw->phy;
268 u32 i, i2ccmd = 0;
269
270 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
271 hw_dbg(hw, "PHY Address %u is out of range\n", offset);
272 return -E1000_ERR_PARAM;
273 }
274
275 /*
276 * Set up Op-code, Phy Address, and register address in the I2CCMD
277 * register. The MAC will take care of interfacing with the
278 * PHY to retrieve the desired data.
279 */
280 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
281 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
282 (E1000_I2CCMD_OPCODE_READ));
283
284 wr32(E1000_I2CCMD, i2ccmd);
285
286 /* Poll the ready bit to see if the I2C read completed */
287 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
288 udelay(50);
289 i2ccmd = rd32(E1000_I2CCMD);
290 if (i2ccmd & E1000_I2CCMD_READY)
291 break;
292 }
293 if (!(i2ccmd & E1000_I2CCMD_READY)) {
294 hw_dbg(hw, "I2CCMD Read did not complete\n");
295 return -E1000_ERR_PHY;
296 }
297 if (i2ccmd & E1000_I2CCMD_ERROR) {
298 hw_dbg(hw, "I2CCMD Error bit set\n");
299 return -E1000_ERR_PHY;
300 }
301
302 /* Need to byte-swap the 16-bit value. */
303 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
304
305 return 0;
306}
307
308/**
309 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
310 * @hw: pointer to the HW structure
311 * @offset: register offset to write to
312 * @data: data to write at register offset
313 *
314 * Writes the data to PHY register at the offset using the serial gigabit
315 * media independent interface.
316 **/
317static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
318 u16 data)
319{
320 struct e1000_phy_info *phy = &hw->phy;
321 u32 i, i2ccmd = 0;
322 u16 phy_data_swapped;
323
324 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
325 hw_dbg(hw, "PHY Address %d is out of range\n", offset);
326 return -E1000_ERR_PARAM;
327 }
328
329 /* Swap the data bytes for the I2C interface */
330 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
331
332 /*
333 * Set up Op-code, Phy Address, and register address in the I2CCMD
334 * register. The MAC will take care of interfacing with the
335 * PHY to retrieve the desired data.
336 */
337 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
338 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
339 E1000_I2CCMD_OPCODE_WRITE |
340 phy_data_swapped);
341
342 wr32(E1000_I2CCMD, i2ccmd);
343
344 /* Poll the ready bit to see if the I2C read completed */
345 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
346 udelay(50);
347 i2ccmd = rd32(E1000_I2CCMD);
348 if (i2ccmd & E1000_I2CCMD_READY)
349 break;
350 }
351 if (!(i2ccmd & E1000_I2CCMD_READY)) {
352 hw_dbg(hw, "I2CCMD Write did not complete\n");
353 return -E1000_ERR_PHY;
354 }
355 if (i2ccmd & E1000_I2CCMD_ERROR) {
356 hw_dbg(hw, "I2CCMD Error bit set\n");
357 return -E1000_ERR_PHY;
358 }
359
360 return 0;
361}
362
363/**
364 * e1000_get_phy_id_82575 - Retreive PHY addr and id
365 * @hw: pointer to the HW structure
366 *
367 * Retreives the PHY address and ID for both PHY's which do and do not use
368 * sgmi interface.
369 **/
370static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
371{
372 struct e1000_phy_info *phy = &hw->phy;
373 s32 ret_val = 0;
374 u16 phy_id;
375
376 /*
377 * For SGMII PHYs, we try the list of possible addresses until
378 * we find one that works. For non-SGMII PHYs
379 * (e.g. integrated copper PHYs), an address of 1 should
380 * work. The result of this function should mean phy->phy_addr
381 * and phy->id are set correctly.
382 */
383 if (!(igb_sgmii_active_82575(hw))) {
384 phy->addr = 1;
385 ret_val = igb_get_phy_id(hw);
386 goto out;
387 }
388
389 /*
390 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
391 * Therefore, we need to test 1-7
392 */
393 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
394 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
395 if (ret_val == 0) {
396 hw_dbg(hw, "Vendor ID 0x%08X read at address %u\n",
397 phy_id,
398 phy->addr);
399 /*
400 * At the time of this writing, The M88 part is
401 * the only supported SGMII PHY product.
402 */
403 if (phy_id == M88_VENDOR)
404 break;
405 } else {
406 hw_dbg(hw, "PHY address %u was unreadable\n",
407 phy->addr);
408 }
409 }
410
411 /* A valid PHY type couldn't be found. */
412 if (phy->addr == 8) {
413 phy->addr = 0;
414 ret_val = -E1000_ERR_PHY;
415 goto out;
416 }
417
418 ret_val = igb_get_phy_id(hw);
419
420out:
421 return ret_val;
422}
423
424/**
425 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
426 * @hw: pointer to the HW structure
427 *
428 * Resets the PHY using the serial gigabit media independent interface.
429 **/
430static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
431{
432 s32 ret_val;
433
434 /*
435 * This isn't a true "hard" reset, but is the only reset
436 * available to us at this time.
437 */
438
439 hw_dbg(hw, "Soft resetting SGMII attached PHY...\n");
440
441 /*
442 * SFP documentation requires the following to configure the SPF module
443 * to work on SGMII. No further documentation is given.
444 */
445 ret_val = hw->phy.ops.write_phy_reg(hw, 0x1B, 0x8084);
446 if (ret_val)
447 goto out;
448
449 ret_val = igb_phy_sw_reset(hw);
450
451out:
452 return ret_val;
453}
454
455/**
456 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
457 * @hw: pointer to the HW structure
458 * @active: true to enable LPLU, false to disable
459 *
460 * Sets the LPLU D0 state according to the active flag. When
461 * activating LPLU this function also disables smart speed
462 * and vice versa. LPLU will not be activated unless the
463 * device autonegotiation advertisement meets standards of
464 * either 10 or 10/100 or 10/100/1000 at all duplexes.
465 * This is a function pointer entry point only called by
466 * PHY setup routines.
467 **/
468static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
469{
470 struct e1000_phy_info *phy = &hw->phy;
471 s32 ret_val;
472 u16 data;
473
474 ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
475 &data);
476 if (ret_val)
477 goto out;
478
479 if (active) {
480 data |= IGP02E1000_PM_D0_LPLU;
481 ret_val = hw->phy.ops.write_phy_reg(hw,
482 IGP02E1000_PHY_POWER_MGMT,
483 data);
484 if (ret_val)
485 goto out;
486
487 /* When LPLU is enabled, we should disable SmartSpeed */
488 ret_val = hw->phy.ops.read_phy_reg(hw,
489 IGP01E1000_PHY_PORT_CONFIG,
490 &data);
491 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
492 ret_val = hw->phy.ops.write_phy_reg(hw,
493 IGP01E1000_PHY_PORT_CONFIG,
494 data);
495 if (ret_val)
496 goto out;
497 } else {
498 data &= ~IGP02E1000_PM_D0_LPLU;
499 ret_val = hw->phy.ops.write_phy_reg(hw,
500 IGP02E1000_PHY_POWER_MGMT,
501 data);
502 /*
503 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
504 * during Dx states where the power conservation is most
505 * important. During driver activity we should enable
506 * SmartSpeed, so performance is maintained.
507 */
508 if (phy->smart_speed == e1000_smart_speed_on) {
509 ret_val = hw->phy.ops.read_phy_reg(hw,
510 IGP01E1000_PHY_PORT_CONFIG,
511 &data);
512 if (ret_val)
513 goto out;
514
515 data |= IGP01E1000_PSCFR_SMART_SPEED;
516 ret_val = hw->phy.ops.write_phy_reg(hw,
517 IGP01E1000_PHY_PORT_CONFIG,
518 data);
519 if (ret_val)
520 goto out;
521 } else if (phy->smart_speed == e1000_smart_speed_off) {
522 ret_val = hw->phy.ops.read_phy_reg(hw,
523 IGP01E1000_PHY_PORT_CONFIG,
524 &data);
525 if (ret_val)
526 goto out;
527
528 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
529 ret_val = hw->phy.ops.write_phy_reg(hw,
530 IGP01E1000_PHY_PORT_CONFIG,
531 data);
532 if (ret_val)
533 goto out;
534 }
535 }
536
537out:
538 return ret_val;
539}
540
541/**
542 * e1000_acquire_nvm_82575 - Request for access to EEPROM
543 * @hw: pointer to the HW structure
544 *
545 * Acquire the necessary semaphores for exclussive access to the EEPROM.
546 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
547 * Return successful if access grant bit set, else clear the request for
548 * EEPROM access and return -E1000_ERR_NVM (-1).
549 **/
550static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
551{
552 s32 ret_val;
553
554 ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
555 if (ret_val)
556 goto out;
557
558 ret_val = igb_acquire_nvm(hw);
559
560 if (ret_val)
561 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
562
563out:
564 return ret_val;
565}
566
567/**
568 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
569 * @hw: pointer to the HW structure
570 *
571 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
572 * then release the semaphores acquired.
573 **/
574static void igb_release_nvm_82575(struct e1000_hw *hw)
575{
576 igb_release_nvm(hw);
577 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
578}
579
580/**
581 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
582 * @hw: pointer to the HW structure
583 * @mask: specifies which semaphore to acquire
584 *
585 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
586 * will also specify which port we're acquiring the lock for.
587 **/
588static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
589{
590 u32 swfw_sync;
591 u32 swmask = mask;
592 u32 fwmask = mask << 16;
593 s32 ret_val = 0;
594 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
595
596 while (i < timeout) {
597 if (igb_get_hw_semaphore(hw)) {
598 ret_val = -E1000_ERR_SWFW_SYNC;
599 goto out;
600 }
601
602 swfw_sync = rd32(E1000_SW_FW_SYNC);
603 if (!(swfw_sync & (fwmask | swmask)))
604 break;
605
606 /*
607 * Firmware currently using resource (fwmask)
608 * or other software thread using resource (swmask)
609 */
610 igb_put_hw_semaphore(hw);
611 mdelay(5);
612 i++;
613 }
614
615 if (i == timeout) {
616 hw_dbg(hw, "Can't access resource, SW_FW_SYNC timeout.\n");
617 ret_val = -E1000_ERR_SWFW_SYNC;
618 goto out;
619 }
620
621 swfw_sync |= swmask;
622 wr32(E1000_SW_FW_SYNC, swfw_sync);
623
624 igb_put_hw_semaphore(hw);
625
626out:
627 return ret_val;
628}
629
630/**
631 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
632 * @hw: pointer to the HW structure
633 * @mask: specifies which semaphore to acquire
634 *
635 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
636 * will also specify which port we're releasing the lock for.
637 **/
638static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
639{
640 u32 swfw_sync;
641
642 while (igb_get_hw_semaphore(hw) != 0);
643 /* Empty */
644
645 swfw_sync = rd32(E1000_SW_FW_SYNC);
646 swfw_sync &= ~mask;
647 wr32(E1000_SW_FW_SYNC, swfw_sync);
648
649 igb_put_hw_semaphore(hw);
650}
651
652/**
653 * e1000_get_cfg_done_82575 - Read config done bit
654 * @hw: pointer to the HW structure
655 *
656 * Read the management control register for the config done bit for
657 * completion status. NOTE: silicon which is EEPROM-less will fail trying
658 * to read the config done bit, so an error is *ONLY* logged and returns
659 * 0. If we were to return with error, EEPROM-less silicon
660 * would not be able to be reset or change link.
661 **/
662static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
663{
664 s32 timeout = PHY_CFG_TIMEOUT;
665 s32 ret_val = 0;
666 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
667
668 if (hw->bus.func == 1)
669 mask = E1000_NVM_CFG_DONE_PORT_1;
670
671 while (timeout) {
672 if (rd32(E1000_EEMNGCTL) & mask)
673 break;
674 msleep(1);
675 timeout--;
676 }
677 if (!timeout)
678 hw_dbg(hw, "MNG configuration cycle has not completed.\n");
679
680 /* If EEPROM is not marked present, init the PHY manually */
681 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
682 (hw->phy.type == e1000_phy_igp_3))
683 igb_phy_init_script_igp3(hw);
684
685 return ret_val;
686}
687
688/**
689 * e1000_check_for_link_82575 - Check for link
690 * @hw: pointer to the HW structure
691 *
692 * If sgmii is enabled, then use the pcs register to determine link, otherwise
693 * use the generic interface for determining link.
694 **/
695static s32 igb_check_for_link_82575(struct e1000_hw *hw)
696{
697 s32 ret_val;
698 u16 speed, duplex;
699
700 /* SGMII link check is done through the PCS register. */
701 if ((hw->phy.media_type != e1000_media_type_copper) ||
702 (igb_sgmii_active_82575(hw)))
703 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
704 &duplex);
705 else
706 ret_val = igb_check_for_copper_link(hw);
707
708 return ret_val;
709}
710
711/**
712 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
713 * @hw: pointer to the HW structure
714 * @speed: stores the current speed
715 * @duplex: stores the current duplex
716 *
717 * Using the physical coding sub-layer (PCS), retreive the current speed and
718 * duplex, then store the values in the pointers provided.
719 **/
720static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
721 u16 *duplex)
722{
723 struct e1000_mac_info *mac = &hw->mac;
724 u32 pcs;
725
726 /* Set up defaults for the return values of this function */
727 mac->serdes_has_link = false;
728 *speed = 0;
729 *duplex = 0;
730
731 /*
732 * Read the PCS Status register for link state. For non-copper mode,
733 * the status register is not accurate. The PCS status register is
734 * used instead.
735 */
736 pcs = rd32(E1000_PCS_LSTAT);
737
738 /*
739 * The link up bit determines when link is up on autoneg. The sync ok
740 * gets set once both sides sync up and agree upon link. Stable link
741 * can be determined by checking for both link up and link sync ok
742 */
743 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
744 mac->serdes_has_link = true;
745
746 /* Detect and store PCS speed */
747 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
748 *speed = SPEED_1000;
749 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
750 *speed = SPEED_100;
751 } else {
752 *speed = SPEED_10;
753 }
754
755 /* Detect and store PCS duplex */
756 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
757 *duplex = FULL_DUPLEX;
758 } else {
759 *duplex = HALF_DUPLEX;
760 }
761 }
762
763 return 0;
764}
765
766/**
767 * e1000_rar_set_82575 - Set receive address register
768 * @hw: pointer to the HW structure
769 * @addr: pointer to the receive address
770 * @index: receive address array register
771 *
772 * Sets the receive address array register at index to the address passed
773 * in by addr.
774 **/
775static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
776{
777 if (index < E1000_RAR_ENTRIES_82575)
778 igb_rar_set(hw, addr, index);
779
780 return;
781}
782
783/**
784 * e1000_reset_hw_82575 - Reset hardware
785 * @hw: pointer to the HW structure
786 *
787 * This resets the hardware into a known state. This is a
788 * function pointer entry point called by the api module.
789 **/
790static s32 igb_reset_hw_82575(struct e1000_hw *hw)
791{
792 u32 ctrl, icr;
793 s32 ret_val;
794
795 /*
796 * Prevent the PCI-E bus from sticking if there is no TLP connection
797 * on the last TLP read/write transaction when MAC is reset.
798 */
799 ret_val = igb_disable_pcie_master(hw);
800 if (ret_val)
801 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
802
803 hw_dbg(hw, "Masking off all interrupts\n");
804 wr32(E1000_IMC, 0xffffffff);
805
806 wr32(E1000_RCTL, 0);
807 wr32(E1000_TCTL, E1000_TCTL_PSP);
808 wrfl();
809
810 msleep(10);
811
812 ctrl = rd32(E1000_CTRL);
813
814 hw_dbg(hw, "Issuing a global reset to MAC\n");
815 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
816
817 ret_val = igb_get_auto_rd_done(hw);
818 if (ret_val) {
819 /*
820 * When auto config read does not complete, do not
821 * return with an error. This can happen in situations
822 * where there is no eeprom and prevents getting link.
823 */
824 hw_dbg(hw, "Auto Read Done did not complete\n");
825 }
826
827 /* If EEPROM is not present, run manual init scripts */
828 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
829 igb_reset_init_script_82575(hw);
830
831 /* Clear any pending interrupt events. */
832 wr32(E1000_IMC, 0xffffffff);
833 icr = rd32(E1000_ICR);
834
835 igb_check_alt_mac_addr(hw);
836
837 return ret_val;
838}
839
840/**
841 * e1000_init_hw_82575 - Initialize hardware
842 * @hw: pointer to the HW structure
843 *
844 * This inits the hardware readying it for operation.
845 **/
846static s32 igb_init_hw_82575(struct e1000_hw *hw)
847{
848 struct e1000_mac_info *mac = &hw->mac;
849 s32 ret_val;
850 u16 i, rar_count = mac->rar_entry_count;
851
852 /* Initialize identification LED */
853 ret_val = igb_id_led_init(hw);
854 if (ret_val) {
855 hw_dbg(hw, "Error initializing identification LED\n");
856 /* This is not fatal and we should not stop init due to this */
857 }
858
859 /* Disabling VLAN filtering */
860 hw_dbg(hw, "Initializing the IEEE VLAN\n");
861 igb_clear_vfta(hw);
862
863 /* Setup the receive address */
864 igb_init_rx_addrs(hw, rar_count);
865 /* Zero out the Multicast HASH table */
866 hw_dbg(hw, "Zeroing the MTA\n");
867 for (i = 0; i < mac->mta_reg_count; i++)
868 array_wr32(E1000_MTA, i, 0);
869
870 /* Setup link and flow control */
871 ret_val = igb_setup_link(hw);
872
873 /*
874 * Clear all of the statistics registers (clear on read). It is
875 * important that we do this after we have tried to establish link
876 * because the symbol error count will increment wildly if there
877 * is no link.
878 */
879 igb_clear_hw_cntrs_82575(hw);
880
881 return ret_val;
882}
883
884/**
885 * e1000_setup_copper_link_82575 - Configure copper link settings
886 * @hw: pointer to the HW structure
887 *
888 * Configures the link for auto-neg or forced speed and duplex. Then we check
889 * for link, once link is established calls to configure collision distance
890 * and flow control are called.
891 **/
892static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
893{
894 u32 ctrl, led_ctrl;
895 s32 ret_val;
896 bool link;
897
898 ctrl = rd32(E1000_CTRL);
899 ctrl |= E1000_CTRL_SLU;
900 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
901 wr32(E1000_CTRL, ctrl);
902
903 switch (hw->phy.type) {
904 case e1000_phy_m88:
905 ret_val = igb_copper_link_setup_m88(hw);
906 break;
907 case e1000_phy_igp_3:
908 ret_val = igb_copper_link_setup_igp(hw);
909 /* Setup activity LED */
910 led_ctrl = rd32(E1000_LEDCTL);
911 led_ctrl &= IGP_ACTIVITY_LED_MASK;
912 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
913 wr32(E1000_LEDCTL, led_ctrl);
914 break;
915 default:
916 ret_val = -E1000_ERR_PHY;
917 break;
918 }
919
920 if (ret_val)
921 goto out;
922
923 if (hw->mac.autoneg) {
924 /*
925 * Setup autoneg and flow control advertisement
926 * and perform autonegotiation.
927 */
928 ret_val = igb_copper_link_autoneg(hw);
929 if (ret_val)
930 goto out;
931 } else {
932 /*
933 * PHY will be set to 10H, 10F, 100H or 100F
934 * depending on user settings.
935 */
936 hw_dbg(hw, "Forcing Speed and Duplex\n");
937 ret_val = igb_phy_force_speed_duplex(hw);
938 if (ret_val) {
939 hw_dbg(hw, "Error Forcing Speed and Duplex\n");
940 goto out;
941 }
942 }
943
944 ret_val = igb_configure_pcs_link_82575(hw);
945 if (ret_val)
946 goto out;
947
948 /*
949 * Check link status. Wait up to 100 microseconds for link to become
950 * valid.
951 */
952 ret_val = igb_phy_has_link(hw,
953 COPPER_LINK_UP_LIMIT,
954 10,
955 &link);
956 if (ret_val)
957 goto out;
958
959 if (link) {
960 hw_dbg(hw, "Valid link established!!!\n");
961 /* Config the MAC and PHY after link is up */
962 igb_config_collision_dist(hw);
963 ret_val = igb_config_fc_after_link_up(hw);
964 } else {
965 hw_dbg(hw, "Unable to establish link!!!\n");
966 }
967
968out:
969 return ret_val;
970}
971
972/**
973 * e1000_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes
974 * @hw: pointer to the HW structure
975 *
976 * Configures speed and duplex for fiber and serdes links.
977 **/
978static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
979{
980 u32 reg;
981
982 /*
983 * On the 82575, SerDes loopback mode persists until it is
984 * explicitly turned off or a power cycle is performed. A read to
985 * the register does not indicate its status. Therefore, we ensure
986 * loopback mode is disabled during initialization.
987 */
988 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
989
990 /* Force link up, set 1gb, set both sw defined pins */
991 reg = rd32(E1000_CTRL);
992 reg |= E1000_CTRL_SLU |
993 E1000_CTRL_SPD_1000 |
994 E1000_CTRL_FRCSPD |
995 E1000_CTRL_SWDPIN0 |
996 E1000_CTRL_SWDPIN1;
997 wr32(E1000_CTRL, reg);
998
999 /* Set switch control to serdes energy detect */
1000 reg = rd32(E1000_CONNSW);
1001 reg |= E1000_CONNSW_ENRGSRC;
1002 wr32(E1000_CONNSW, reg);
1003
1004 /*
1005 * New SerDes mode allows for forcing speed or autonegotiating speed
1006 * at 1gb. Autoneg should be default set by most drivers. This is the
1007 * mode that will be compatible with older link partners and switches.
1008 * However, both are supported by the hardware and some drivers/tools.
1009 */
1010 reg = rd32(E1000_PCS_LCTL);
1011
1012 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1013 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1014
1015 if (hw->mac.autoneg) {
1016 /* Set PCS register for autoneg */
1017 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1018 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1019 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1020 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1021 hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1022 } else {
1023 /* Set PCS register for forced speed */
1024 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1025 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1026 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1027 E1000_PCS_LCTL_FSD | /* Force Speed */
1028 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1029 hw_dbg(hw, "Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1030 }
1031 wr32(E1000_PCS_LCTL, reg);
1032
1033 return 0;
1034}
1035
1036/**
1037 * e1000_configure_pcs_link_82575 - Configure PCS link
1038 * @hw: pointer to the HW structure
1039 *
1040 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1041 * only used on copper connections where the serialized gigabit media
1042 * independent interface (sgmii) is being used. Configures the link
1043 * for auto-negotiation or forces speed/duplex.
1044 **/
1045static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw)
1046{
1047 struct e1000_mac_info *mac = &hw->mac;
1048 u32 reg = 0;
1049
1050 if (hw->phy.media_type != e1000_media_type_copper ||
1051 !(igb_sgmii_active_82575(hw)))
1052 goto out;
1053
1054 /* For SGMII, we need to issue a PCS autoneg restart */
1055 reg = rd32(E1000_PCS_LCTL);
1056
1057 /* AN time out should be disabled for SGMII mode */
1058 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1059
1060 if (mac->autoneg) {
1061 /* Make sure forced speed and force link are not set */
1062 reg &= ~(E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1063
1064 /*
1065 * The PHY should be setup prior to calling this function.
1066 * All we need to do is restart autoneg and enable autoneg.
1067 */
1068 reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE;
1069 } else {
1070 /* Set PCS regiseter for forced speed */
1071
1072 /* Turn off bits for full duplex, speed, and autoneg */
1073 reg &= ~(E1000_PCS_LCTL_FSV_1000 |
1074 E1000_PCS_LCTL_FSV_100 |
1075 E1000_PCS_LCTL_FDV_FULL |
1076 E1000_PCS_LCTL_AN_ENABLE);
1077
1078 /* Check for duplex first */
1079 if (mac->forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
1080 reg |= E1000_PCS_LCTL_FDV_FULL;
1081
1082 /* Now set speed */
1083 if (mac->forced_speed_duplex & E1000_ALL_100_SPEED)
1084 reg |= E1000_PCS_LCTL_FSV_100;
1085
1086 /* Force speed and force link */
1087 reg |= E1000_PCS_LCTL_FSD |
1088 E1000_PCS_LCTL_FORCE_LINK |
1089 E1000_PCS_LCTL_FLV_LINK_UP;
1090
1091 hw_dbg(hw,
1092 "Wrote 0x%08X to PCS_LCTL to configure forced link\n",
1093 reg);
1094 }
1095 wr32(E1000_PCS_LCTL, reg);
1096
1097out:
1098 return 0;
1099}
1100
1101/**
1102 * e1000_sgmii_active_82575 - Return sgmii state
1103 * @hw: pointer to the HW structure
1104 *
1105 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1106 * which can be enabled for use in the embedded applications. Simply
1107 * return the current state of the sgmii interface.
1108 **/
1109static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1110{
1111 struct e1000_dev_spec_82575 *dev_spec;
1112 bool ret_val;
1113
1114 if (hw->mac.type != e1000_82575) {
1115 ret_val = false;
1116 goto out;
1117 }
1118
1119 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
1120
1121 ret_val = dev_spec->sgmii_active;
1122
1123out:
1124 return ret_val;
1125}
1126
1127/**
1128 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1129 * @hw: pointer to the HW structure
1130 *
1131 * Inits recommended HW defaults after a reset when there is no EEPROM
1132 * detected. This is only for the 82575.
1133 **/
1134static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1135{
1136 if (hw->mac.type == e1000_82575) {
1137 hw_dbg(hw, "Running reset init script for 82575\n");
1138 /* SerDes configuration via SERDESCTRL */
1139 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1140 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1141 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1142 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1143
1144 /* CCM configuration via CCMCTL register */
1145 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1146 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1147
1148 /* PCIe lanes configuration */
1149 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1150 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1151 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1152 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1153
1154 /* PCIe PLL Configuration */
1155 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1156 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1157 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1158 }
1159
1160 return 0;
1161}
1162
1163/**
1164 * e1000_read_mac_addr_82575 - Read device MAC address
1165 * @hw: pointer to the HW structure
1166 **/
1167static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1168{
1169 s32 ret_val = 0;
1170
1171 if (igb_check_alt_mac_addr(hw))
1172 ret_val = igb_read_mac_addr(hw);
1173
1174 return ret_val;
1175}
1176
1177/**
1178 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1179 * @hw: pointer to the HW structure
1180 *
1181 * Clears the hardware counters by reading the counter registers.
1182 **/
1183static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1184{
1185 u32 temp;
1186
1187 igb_clear_hw_cntrs_base(hw);
1188
1189 temp = rd32(E1000_PRC64);
1190 temp = rd32(E1000_PRC127);
1191 temp = rd32(E1000_PRC255);
1192 temp = rd32(E1000_PRC511);
1193 temp = rd32(E1000_PRC1023);
1194 temp = rd32(E1000_PRC1522);
1195 temp = rd32(E1000_PTC64);
1196 temp = rd32(E1000_PTC127);
1197 temp = rd32(E1000_PTC255);
1198 temp = rd32(E1000_PTC511);
1199 temp = rd32(E1000_PTC1023);
1200 temp = rd32(E1000_PTC1522);
1201
1202 temp = rd32(E1000_ALGNERRC);
1203 temp = rd32(E1000_RXERRC);
1204 temp = rd32(E1000_TNCRS);
1205 temp = rd32(E1000_CEXTERR);
1206 temp = rd32(E1000_TSCTC);
1207 temp = rd32(E1000_TSCTFC);
1208
1209 temp = rd32(E1000_MGTPRC);
1210 temp = rd32(E1000_MGTPDC);
1211 temp = rd32(E1000_MGTPTC);
1212
1213 temp = rd32(E1000_IAC);
1214 temp = rd32(E1000_ICRXOC);
1215
1216 temp = rd32(E1000_ICRXPTC);
1217 temp = rd32(E1000_ICRXATC);
1218 temp = rd32(E1000_ICTXPTC);
1219 temp = rd32(E1000_ICTXATC);
1220 temp = rd32(E1000_ICTXQEC);
1221 temp = rd32(E1000_ICTXQMTC);
1222 temp = rd32(E1000_ICRXDMTC);
1223
1224 temp = rd32(E1000_CBTMPC);
1225 temp = rd32(E1000_HTDPMC);
1226 temp = rd32(E1000_CBRMPC);
1227 temp = rd32(E1000_RPTHC);
1228 temp = rd32(E1000_HGPTC);
1229 temp = rd32(E1000_HTCBDPC);
1230 temp = rd32(E1000_HGORCL);
1231 temp = rd32(E1000_HGORCH);
1232 temp = rd32(E1000_HGOTCL);
1233 temp = rd32(E1000_HGOTCH);
1234 temp = rd32(E1000_LENERRS);
1235
1236 /* This register should not be read in copper configurations */
1237 if (hw->phy.media_type == e1000_media_type_internal_serdes)
1238 temp = rd32(E1000_SCVPC);
1239}
1240
1241static struct e1000_mac_operations e1000_mac_ops_82575 = {
1242 .reset_hw = igb_reset_hw_82575,
1243 .init_hw = igb_init_hw_82575,
1244 .check_for_link = igb_check_for_link_82575,
1245 .rar_set = igb_rar_set_82575,
1246 .read_mac_addr = igb_read_mac_addr_82575,
1247 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
1248};
1249
1250static struct e1000_phy_operations e1000_phy_ops_82575 = {
1251 .acquire_phy = igb_acquire_phy_82575,
1252 .get_cfg_done = igb_get_cfg_done_82575,
1253 .release_phy = igb_release_phy_82575,
1254};
1255
1256static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
1257 .acquire_nvm = igb_acquire_nvm_82575,
1258 .read_nvm = igb_read_nvm_eerd,
1259 .release_nvm = igb_release_nvm_82575,
1260 .write_nvm = igb_write_nvm_spi,
1261};
1262
1263const struct e1000_info e1000_82575_info = {
1264 .get_invariants = igb_get_invariants_82575,
1265 .mac_ops = &e1000_mac_ops_82575,
1266 .phy_ops = &e1000_phy_ops_82575,
1267 .nvm_ops = &e1000_nvm_ops_82575,
1268};
1269
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
new file mode 100644
index 000000000000..6604d96bd567
--- /dev/null
+++ b/drivers/net/igb/e1000_82575.h
@@ -0,0 +1,150 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_
30
31#define E1000_RAR_ENTRIES_82575 16
32
33/* SRRCTL bit definitions */
34#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
35#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
36#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
37#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
38
39#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
40#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
41#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
42#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
43
44#define E1000_EICR_TX_QUEUE ( \
45 E1000_EICR_TX_QUEUE0 | \
46 E1000_EICR_TX_QUEUE1 | \
47 E1000_EICR_TX_QUEUE2 | \
48 E1000_EICR_TX_QUEUE3)
49
50#define E1000_EICR_RX_QUEUE ( \
51 E1000_EICR_RX_QUEUE0 | \
52 E1000_EICR_RX_QUEUE1 | \
53 E1000_EICR_RX_QUEUE2 | \
54 E1000_EICR_RX_QUEUE3)
55
56#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
57#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
58
59/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
60
61/* Receive Descriptor - Advanced */
62union e1000_adv_rx_desc {
63 struct {
64 u64 pkt_addr; /* Packet buffer address */
65 u64 hdr_addr; /* Header buffer address */
66 } read;
67 struct {
68 struct {
69 struct {
70 u16 pkt_info; /* RSS type, Packet type */
71 u16 hdr_info; /* Split Header,
72 * header buffer length */
73 } lo_dword;
74 union {
75 u32 rss; /* RSS Hash */
76 struct {
77 u16 ip_id; /* IP id */
78 u16 csum; /* Packet Checksum */
79 } csum_ip;
80 } hi_dword;
81 } lower;
82 struct {
83 u32 status_error; /* ext status/error */
84 u16 length; /* Packet length */
85 u16 vlan; /* VLAN tag */
86 } upper;
87 } wb; /* writeback */
88};
89
90#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
91#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
92
93/* RSS Hash results */
94
95/* RSS Packet Types as indicated in the receive descriptor */
96
97/* Transmit Descriptor - Advanced */
98union e1000_adv_tx_desc {
99 struct {
100 u64 buffer_addr; /* Address of descriptor's data buf */
101 u32 cmd_type_len;
102 u32 olinfo_status;
103 } read;
104 struct {
105 u64 rsvd; /* Reserved */
106 u32 nxtseq_seed;
107 u32 status;
108 } wb;
109};
110
111/* Adv Transmit Descriptor Config Masks */
112#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
113#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
114#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
115#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
116#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
117#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
118#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
119
120/* Context descriptors */
121struct e1000_adv_tx_context_desc {
122 u32 vlan_macip_lens;
123 u32 seqnum_seed;
124 u32 type_tucmd_mlhl;
125 u32 mss_l4len_idx;
126};
127
128#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
129#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
130#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
131/* IPSec Encrypt Enable for ESP */
132#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
133#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
134/* Adv ctxt IPSec SA IDX mask */
135/* Adv ctxt IPSec ESP len mask */
136
137/* Additional Transmit Descriptor Control definitions */
138#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
139/* Tx Queue Arbitration Priority 0=low, 1=high */
140
141/* Additional Receive Descriptor Control definitions */
142#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
143
144/* Direct Cache Access (DCA) definitions */
145
146
147
148#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */
149
150#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
new file mode 100644
index 000000000000..8da9ffedc425
--- /dev/null
+++ b/drivers/net/igb/e1000_defines.h
@@ -0,0 +1,772 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_DEFINES_H_
29#define _E1000_DEFINES_H_
30
31/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
32#define REQ_TX_DESCRIPTOR_MULTIPLE 8
33#define REQ_RX_DESCRIPTOR_MULTIPLE 8
34
35/* Definitions for power management and wakeup registers */
36/* Wake Up Control */
37#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
38
39/* Wake Up Filter Control */
40#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
41#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
42#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
43#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
45#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
46#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
47#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
48#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
49#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
50#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
51#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
52#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
53
54/* Wake Up Status */
55
56/* Wake Up Packet Length */
57
58/* Four Flexible Filters are supported */
59#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
60
61/* Each Flexible Filter is at most 128 (0x80) bytes in length */
62#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
63
64
65/* Extended Device Control */
66#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
67#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
68#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
70#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
71#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
74#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
75#define E1000_CTRL_EXT_EIAME 0x01000000
76#define E1000_CTRL_EXT_IRCA 0x00000001
77/* Interrupt delay cancellation */
78/* Driver loaded bit for FW */
79#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
80/* Interrupt acknowledge Auto-mask */
81/* Clear Interrupt timers after IMS clear */
82/* packet buffer parity error detection enabled */
83/* descriptor FIFO parity error detection enable */
84#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
85#define E1000_I2CCMD_REG_ADDR_SHIFT 16
86#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
87#define E1000_I2CCMD_OPCODE_READ 0x08000000
88#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
89#define E1000_I2CCMD_READY 0x20000000
90#define E1000_I2CCMD_ERROR 0x80000000
91#define E1000_MAX_SGMII_PHY_REG_ADDR 255
92#define E1000_I2CCMD_PHY_TIMEOUT 200
93
94/* Receive Decriptor bit definitions */
95#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
96#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
97#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
98#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
99#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
100#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
101#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
102#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
103#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
104#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
105#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
106#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
107#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
108
109#define E1000_RXDEXT_STATERR_CE 0x01000000
110#define E1000_RXDEXT_STATERR_SE 0x02000000
111#define E1000_RXDEXT_STATERR_SEQ 0x04000000
112#define E1000_RXDEXT_STATERR_CXE 0x10000000
113#define E1000_RXDEXT_STATERR_TCPE 0x20000000
114#define E1000_RXDEXT_STATERR_IPE 0x40000000
115#define E1000_RXDEXT_STATERR_RXE 0x80000000
116
117/* mask to determine if packets should be dropped due to frame errors */
118#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
119 E1000_RXD_ERR_CE | \
120 E1000_RXD_ERR_SE | \
121 E1000_RXD_ERR_SEQ | \
122 E1000_RXD_ERR_CXE | \
123 E1000_RXD_ERR_RXE)
124
125/* Same mask, but for extended and packet split descriptors */
126#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
127 E1000_RXDEXT_STATERR_CE | \
128 E1000_RXDEXT_STATERR_SE | \
129 E1000_RXDEXT_STATERR_SEQ | \
130 E1000_RXDEXT_STATERR_CXE | \
131 E1000_RXDEXT_STATERR_RXE)
132
133#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
134#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
135#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
136#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
137#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
138
139
140/* Management Control */
141#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
142#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
143#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
144/* Enable Neighbor Discovery Filtering */
145#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
146#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
147/* Enable MAC address filtering */
148#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
149/* Enable MNG packets to host memory */
150#define E1000_MANC_EN_MNG2HOST 0x00200000
151/* Enable IP address filtering */
152
153
154/* Receive Control */
155#define E1000_RCTL_EN 0x00000002 /* enable */
156#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
157#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
158#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
159#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
160#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
161#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
162#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
163#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
164#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
165#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
166/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
167#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
168#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
169#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
170#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
171/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
172#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
173#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
174#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
175#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
176#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
177#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
178#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
179
180/*
181 * Use byte values for the following shift parameters
182 * Usage:
183 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
184 * E1000_PSRCTL_BSIZE0_MASK) |
185 * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
186 * E1000_PSRCTL_BSIZE1_MASK) |
187 * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
188 * E1000_PSRCTL_BSIZE2_MASK) |
189 * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
190 * E1000_PSRCTL_BSIZE3_MASK))
191 * where value0 = [128..16256], default=256
192 * value1 = [1024..64512], default=4096
193 * value2 = [0..64512], default=4096
194 * value3 = [0..64512], default=0
195 */
196
197#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
198#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
199#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
200#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
201
202#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
203#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
204#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
205#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
206
207/* SWFW_SYNC Definitions */
208#define E1000_SWFW_EEP_SM 0x1
209#define E1000_SWFW_PHY0_SM 0x2
210#define E1000_SWFW_PHY1_SM 0x4
211
212/* FACTPS Definitions */
213/* Device Control */
214#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
215#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
216#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
217#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
218#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
219#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
220#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
221#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
222#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
223#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
224/* Defined polarity of Dock/Undock indication in SDP[0] */
225/* Reset both PHY ports, through PHYRST_N pin */
226/* enable link status from external LINK_0 and LINK_1 pins */
227#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
228#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
229#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
230#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
231#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
232#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
233#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
234#define E1000_CTRL_RST 0x04000000 /* Global reset */
235#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
236#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
237#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
238#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
239/* Initiate an interrupt to manageability engine */
240#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
241
242/* Bit definitions for the Management Data IO (MDIO) and Management Data
243 * Clock (MDC) pins in the Device Control Register.
244 */
245
246#define E1000_CONNSW_ENRGSRC 0x4
247#define E1000_PCS_LCTL_FLV_LINK_UP 1
248#define E1000_PCS_LCTL_FSV_100 2
249#define E1000_PCS_LCTL_FSV_1000 4
250#define E1000_PCS_LCTL_FDV_FULL 8
251#define E1000_PCS_LCTL_FSD 0x10
252#define E1000_PCS_LCTL_FORCE_LINK 0x20
253#define E1000_PCS_LCTL_AN_ENABLE 0x10000
254#define E1000_PCS_LCTL_AN_RESTART 0x20000
255#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
256
257#define E1000_PCS_LSTS_LINK_OK 1
258#define E1000_PCS_LSTS_SPEED_100 2
259#define E1000_PCS_LSTS_SPEED_1000 4
260#define E1000_PCS_LSTS_DUPLEX_FULL 8
261#define E1000_PCS_LSTS_SYNK_OK 0x10
262
263/* Device Status */
264#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
265#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
266#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
267#define E1000_STATUS_FUNC_SHIFT 2
268#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
269#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
270#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
271#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
272/* Change in Dock/Undock state. Clear on write '0'. */
273/* Status of Master requests. */
274#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
275/* BMC external code execution disabled */
276
277/* Constants used to intrepret the masked PCI-X bus speed. */
278
279#define SPEED_10 10
280#define SPEED_100 100
281#define SPEED_1000 1000
282#define HALF_DUPLEX 1
283#define FULL_DUPLEX 2
284
285
286#define ADVERTISE_10_HALF 0x0001
287#define ADVERTISE_10_FULL 0x0002
288#define ADVERTISE_100_HALF 0x0004
289#define ADVERTISE_100_FULL 0x0008
290#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
291#define ADVERTISE_1000_FULL 0x0020
292
293/* 1000/H is not supported, nor spec-compliant. */
294#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
295 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
296 ADVERTISE_1000_FULL)
297#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
298 ADVERTISE_100_HALF | ADVERTISE_100_FULL)
299#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
300#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
301#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
302 ADVERTISE_1000_FULL)
303#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
304
305#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
306
307/* LED Control */
308#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
309#define E1000_LEDCTL_LED0_MODE_SHIFT 0
310#define E1000_LEDCTL_LED0_IVRT 0x00000040
311#define E1000_LEDCTL_LED0_BLINK 0x00000080
312
313#define E1000_LEDCTL_MODE_LED_ON 0xE
314#define E1000_LEDCTL_MODE_LED_OFF 0xF
315
316/* Transmit Descriptor bit definitions */
317#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
318#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
319#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
320#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
321#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
322#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
323/* Extended desc bits for Linksec and timesync */
324
325/* Transmit Control */
326#define E1000_TCTL_EN 0x00000002 /* enable tx */
327#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
328#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
329#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
330#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
331
332/* Transmit Arbitration Count */
333
334/* SerDes Control */
335#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
336
337/* Receive Checksum Control */
338#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
339#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
340#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
341
342/* Header split receive */
343
344/* Collision related configuration parameters */
345#define E1000_COLLISION_THRESHOLD 15
346#define E1000_CT_SHIFT 4
347#define E1000_COLLISION_DISTANCE 63
348#define E1000_COLD_SHIFT 12
349
350/* Ethertype field values */
351#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
352
353#define MAX_JUMBO_FRAME_SIZE 0x3F00
354
355/* Extended Configuration Control and Size */
356#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
357
358/* PBA constants */
359#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
360#define E1000_PBA_24K 0x0018
361#define E1000_PBA_34K 0x0022
362
363#define IFS_MAX 80
364#define IFS_MIN 40
365#define IFS_RATIO 4
366#define IFS_STEP 10
367#define MIN_NUM_XMITS 1000
368
369/* SW Semaphore Register */
370#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
371#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
372
373/* Interrupt Cause Read */
374#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
375#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
376#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
377#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
378#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
379#define E1000_ICR_RXO 0x00000040 /* rx overrun */
380#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
381#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
382#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
383#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
384#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
385#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
386#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
387#define E1000_ICR_TXD_LOW 0x00008000
388#define E1000_ICR_SRPD 0x00010000
389#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
390#define E1000_ICR_MNG 0x00040000 /* Manageability event */
391#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
392/* If this bit asserted, the driver should claim the interrupt */
393#define E1000_ICR_INT_ASSERTED 0x80000000
394/* queue 0 Rx descriptor FIFO parity error */
395#define E1000_ICR_RXD_FIFO_PAR0 0x00100000
396/* queue 0 Tx descriptor FIFO parity error */
397#define E1000_ICR_TXD_FIFO_PAR0 0x00200000
398/* host arb read buffer parity error */
399#define E1000_ICR_HOST_ARB_PAR 0x00400000
400#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
401/* queue 1 Rx descriptor FIFO parity error */
402#define E1000_ICR_RXD_FIFO_PAR1 0x01000000
403/* queue 1 Tx descriptor FIFO parity error */
404#define E1000_ICR_TXD_FIFO_PAR1 0x02000000
405/* FW changed the status of DISSW bit in the FWSM */
406#define E1000_ICR_DSW 0x00000020
407/* LAN connected device generates an interrupt */
408#define E1000_ICR_PHYINT 0x00001000
409#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
410
411/* Extended Interrupt Cause Read */
412#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
413#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
414#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
415#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
416#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
417#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
418#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
419#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
420#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
421#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
422/* TCP Timer */
423
424/*
425 * This defines the bits that are set in the Interrupt Mask
426 * Set/Read Register. Each bit is documented below:
427 * o RXT0 = Receiver Timer Interrupt (ring 0)
428 * o TXDW = Transmit Descriptor Written Back
429 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
430 * o RXSEQ = Receive Sequence Error
431 * o LSC = Link Status Change
432 */
433#define IMS_ENABLE_MASK ( \
434 E1000_IMS_RXT0 | \
435 E1000_IMS_TXDW | \
436 E1000_IMS_RXDMT0 | \
437 E1000_IMS_RXSEQ | \
438 E1000_IMS_LSC)
439
440/* Interrupt Mask Set */
441#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
442#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
443#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
444#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
445#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
446/* queue 0 Rx descriptor FIFO parity error */
447/* queue 0 Tx descriptor FIFO parity error */
448/* host arb read buffer parity error */
449/* packet buffer parity error */
450/* queue 1 Rx descriptor FIFO parity error */
451/* queue 1 Tx descriptor FIFO parity error */
452
453/* Extended Interrupt Mask Set */
454#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
455#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
456
457/* Interrupt Cause Set */
458#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
459#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
460/* queue 0 Rx descriptor FIFO parity error */
461/* queue 0 Tx descriptor FIFO parity error */
462/* host arb read buffer parity error */
463/* packet buffer parity error */
464/* queue 1 Rx descriptor FIFO parity error */
465/* queue 1 Tx descriptor FIFO parity error */
466
467/* Extended Interrupt Cause Set */
468
469/* Transmit Descriptor Control */
470/* Enable the counting of descriptors still to be processed. */
471
472/* Flow Control Constants */
473#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
474#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
475#define FLOW_CONTROL_TYPE 0x8808
476
477/* 802.1q VLAN Packet Size */
478#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
479#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
480
481/* Receive Address */
482/*
483 * Number of high/low register pairs in the RAR. The RAR (Receive Address
484 * Registers) holds the directed and multicast addresses that we monitor.
485 * Technically, we have 16 spots. However, we reserve one of these spots
486 * (RAR[15]) for our directed address used by controllers with
487 * manageability enabled, allowing us room for 15 multicast addresses.
488 */
489#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
490
491/* Error Codes */
492#define E1000_ERR_NVM 1
493#define E1000_ERR_PHY 2
494#define E1000_ERR_CONFIG 3
495#define E1000_ERR_PARAM 4
496#define E1000_ERR_MAC_INIT 5
497#define E1000_ERR_RESET 9
498#define E1000_ERR_MASTER_REQUESTS_PENDING 10
499#define E1000_ERR_HOST_INTERFACE_COMMAND 11
500#define E1000_BLK_PHY_RESET 12
501#define E1000_ERR_SWFW_SYNC 13
502#define E1000_NOT_IMPLEMENTED 14
503
504/* Loop limit on how long we wait for auto-negotiation to complete */
505#define COPPER_LINK_UP_LIMIT 10
506#define PHY_AUTO_NEG_LIMIT 45
507#define PHY_FORCE_LIMIT 20
508/* Number of 100 microseconds we wait for PCI Express master disable */
509#define MASTER_DISABLE_TIMEOUT 800
510/* Number of milliseconds we wait for PHY configuration done after MAC reset */
511#define PHY_CFG_TIMEOUT 100
512/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
513/* Number of milliseconds for NVM auto read done after MAC reset. */
514#define AUTO_READ_DONE_TIMEOUT 10
515
516/* Flow Control */
517#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
518
519/* Transmit Configuration Word */
520#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
521
522/* Receive Configuration Word */
523
524/* PCI Express Control */
525#define E1000_GCR_RXD_NO_SNOOP 0x00000001
526#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
527#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
528#define E1000_GCR_TXD_NO_SNOOP 0x00000008
529#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
530#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
531
532#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
533 E1000_GCR_RXDSCW_NO_SNOOP | \
534 E1000_GCR_RXDSCR_NO_SNOOP | \
535 E1000_GCR_TXD_NO_SNOOP | \
536 E1000_GCR_TXDSCW_NO_SNOOP | \
537 E1000_GCR_TXDSCR_NO_SNOOP)
538
539/* PHY Control Register */
540#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
541#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
542#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
543#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
544#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
545#define MII_CR_SPEED_1000 0x0040
546#define MII_CR_SPEED_100 0x2000
547#define MII_CR_SPEED_10 0x0000
548
549/* PHY Status Register */
550#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
551#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
552
553/* Autoneg Advertisement Register */
554#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
555#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
556#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
557#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
558#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
559#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
560
561/* Link Partner Ability Register (Base Page) */
562#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
563#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
564
565/* Autoneg Expansion Register */
566
567/* 1000BASE-T Control Register */
568#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
569#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
570 /* 0=DTE device */
571#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
572 /* 0=Configure PHY as Slave */
573#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
574 /* 0=Automatic Master/Slave config */
575
576/* 1000BASE-T Status Register */
577#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
578#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
579
580
581/* PHY 1000 MII Register/Bit Definitions */
582/* PHY Registers defined by IEEE */
583#define PHY_CONTROL 0x00 /* Control Register */
584#define PHY_STATUS 0x01 /* Status Regiser */
585#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
586#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
587#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
588#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
589#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
590#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
591
592/* NVM Control */
593#define E1000_EECD_SK 0x00000001 /* NVM Clock */
594#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
595#define E1000_EECD_DI 0x00000004 /* NVM Data In */
596#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
597#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
598#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
599#define E1000_EECD_PRES 0x00000100 /* NVM Present */
600/* NVM Addressing bits based on type 0=small, 1=large */
601#define E1000_EECD_ADDR_BITS 0x00000400
602#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
603#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
604#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
605#define E1000_EECD_SIZE_EX_SHIFT 11
606
607/* Offset to data in NVM read/write registers */
608#define E1000_NVM_RW_REG_DATA 16
609#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
610#define E1000_NVM_RW_REG_START 1 /* Start operation */
611#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
612#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
613
614/* NVM Word Offsets */
615#define NVM_ID_LED_SETTINGS 0x0004
616/* For SERDES output amplitude adjustment. */
617#define NVM_INIT_CONTROL2_REG 0x000F
618#define NVM_INIT_CONTROL3_PORT_A 0x0024
619#define NVM_ALT_MAC_ADDR_PTR 0x0037
620#define NVM_CHECKSUM_REG 0x003F
621
622#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
623#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
624
625/* Mask bits for fields in Word 0x0f of the NVM */
626#define NVM_WORD0F_PAUSE_MASK 0x3000
627#define NVM_WORD0F_ASM_DIR 0x2000
628
629/* Mask bits for fields in Word 0x1a of the NVM */
630
631/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
632#define NVM_SUM 0xBABA
633
634#define NVM_PBA_OFFSET_0 8
635#define NVM_PBA_OFFSET_1 9
636#define NVM_WORD_SIZE_BASE_SHIFT 6
637
638/* NVM Commands - Microwire */
639
640/* NVM Commands - SPI */
641#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
642#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
643#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
644#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
645#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
646
647/* SPI NVM Status Register */
648#define NVM_STATUS_RDY_SPI 0x01
649
650/* Word definitions for ID LED Settings */
651#define ID_LED_RESERVED_0000 0x0000
652#define ID_LED_RESERVED_FFFF 0xFFFF
653#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
654 (ID_LED_OFF1_OFF2 << 8) | \
655 (ID_LED_DEF1_DEF2 << 4) | \
656 (ID_LED_DEF1_DEF2))
657#define ID_LED_DEF1_DEF2 0x1
658#define ID_LED_DEF1_ON2 0x2
659#define ID_LED_DEF1_OFF2 0x3
660#define ID_LED_ON1_DEF2 0x4
661#define ID_LED_ON1_ON2 0x5
662#define ID_LED_ON1_OFF2 0x6
663#define ID_LED_OFF1_DEF2 0x7
664#define ID_LED_OFF1_ON2 0x8
665#define ID_LED_OFF1_OFF2 0x9
666
667#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
668#define IGP_ACTIVITY_LED_ENABLE 0x0300
669#define IGP_LED3_MODE 0x07000000
670
671/* PCI/PCI-X/PCI-EX Config space */
672#define PCI_HEADER_TYPE_REGISTER 0x0E
673#define PCIE_LINK_STATUS 0x12
674
675#define PCI_HEADER_TYPE_MULTIFUNC 0x80
676#define PCIE_LINK_WIDTH_MASK 0x3F0
677#define PCIE_LINK_WIDTH_SHIFT 4
678
679#define PHY_REVISION_MASK 0xFFFFFFF0
680#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
681#define MAX_PHY_MULTI_PAGE_REG 0xF
682
683/* Bit definitions for valid PHY IDs. */
684/*
685 * I = Integrated
686 * E = External
687 */
688#define M88E1111_I_PHY_ID 0x01410CC0
689#define IGP03E1000_E_PHY_ID 0x02A80390
690#define M88_VENDOR 0x0141
691
692/* M88E1000 Specific Registers */
693#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
694#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
695#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
696
697#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
698#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
699
700/* M88E1000 PHY Specific Control Register */
701#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
702/* 1=CLK125 low, 0=CLK125 toggling */
703#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
704 /* Manual MDI configuration */
705#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
706/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
707#define M88E1000_PSCR_AUTO_X_1000T 0x0040
708/* Auto crossover enabled all speeds */
709#define M88E1000_PSCR_AUTO_X_MODE 0x0060
710/*
711 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold
712 * 0=Normal 10BASE-T RX Threshold
713 */
714/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
715#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
716
717/* M88E1000 PHY Specific Status Register */
718#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
719#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
720#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
721/*
722 * 0 = <50M
723 * 1 = 50-80M
724 * 2 = 80-110M
725 * 3 = 110-140M
726 * 4 = >140M
727 */
728#define M88E1000_PSSR_CABLE_LENGTH 0x0380
729#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
730#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
731
732#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
733
734/* M88E1000 Extended PHY Specific Control Register */
735/*
736 * 1 = Lost lock detect enabled.
737 * Will assert lost lock and bring
738 * link down if idle not seen
739 * within 1ms in 1000BASE-T
740 */
741/*
742 * Number of times we will attempt to autonegotiate before downshifting if we
743 * are the master
744 */
745#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
746#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
747/*
748 * Number of times we will attempt to autonegotiate before downshifting if we
749 * are the slave
750 */
751#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
752#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
753#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
754
755/* M88EC018 Rev 2 specific DownShift settings */
756#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
757#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
758
759/* MDI Control */
760#define E1000_MDIC_REG_SHIFT 16
761#define E1000_MDIC_PHY_SHIFT 21
762#define E1000_MDIC_OP_WRITE 0x04000000
763#define E1000_MDIC_OP_READ 0x08000000
764#define E1000_MDIC_READY 0x10000000
765#define E1000_MDIC_ERROR 0x40000000
766
767/* SerDes Control */
768#define E1000_GEN_CTL_READY 0x80000000
769#define E1000_GEN_CTL_ADDRESS_SHIFT 8
770#define E1000_GEN_POLL_TIMEOUT 640
771
772#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
new file mode 100644
index 000000000000..161fb68764af
--- /dev/null
+++ b/drivers/net/igb/e1000_hw.h
@@ -0,0 +1,599 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_HW_H_
29#define _E1000_HW_H_
30
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/io.h>
34
35#include "e1000_mac.h"
36#include "e1000_regs.h"
37#include "e1000_defines.h"
38
39struct e1000_hw;
40
41#define E1000_DEV_ID_82575EB_COPPER 0x10A7
42#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
43#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
44
45#define E1000_REVISION_2 2
46#define E1000_REVISION_4 4
47
48#define E1000_FUNC_1 1
49
50enum e1000_mac_type {
51 e1000_undefined = 0,
52 e1000_82575,
53 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
54};
55
56enum e1000_media_type {
57 e1000_media_type_unknown = 0,
58 e1000_media_type_copper = 1,
59 e1000_media_type_fiber = 2,
60 e1000_media_type_internal_serdes = 3,
61 e1000_num_media_types
62};
63
64enum e1000_nvm_type {
65 e1000_nvm_unknown = 0,
66 e1000_nvm_none,
67 e1000_nvm_eeprom_spi,
68 e1000_nvm_eeprom_microwire,
69 e1000_nvm_flash_hw,
70 e1000_nvm_flash_sw
71};
72
73enum e1000_nvm_override {
74 e1000_nvm_override_none = 0,
75 e1000_nvm_override_spi_small,
76 e1000_nvm_override_spi_large,
77 e1000_nvm_override_microwire_small,
78 e1000_nvm_override_microwire_large
79};
80
81enum e1000_phy_type {
82 e1000_phy_unknown = 0,
83 e1000_phy_none,
84 e1000_phy_m88,
85 e1000_phy_igp,
86 e1000_phy_igp_2,
87 e1000_phy_gg82563,
88 e1000_phy_igp_3,
89 e1000_phy_ife,
90};
91
92enum e1000_bus_type {
93 e1000_bus_type_unknown = 0,
94 e1000_bus_type_pci,
95 e1000_bus_type_pcix,
96 e1000_bus_type_pci_express,
97 e1000_bus_type_reserved
98};
99
100enum e1000_bus_speed {
101 e1000_bus_speed_unknown = 0,
102 e1000_bus_speed_33,
103 e1000_bus_speed_66,
104 e1000_bus_speed_100,
105 e1000_bus_speed_120,
106 e1000_bus_speed_133,
107 e1000_bus_speed_2500,
108 e1000_bus_speed_5000,
109 e1000_bus_speed_reserved
110};
111
112enum e1000_bus_width {
113 e1000_bus_width_unknown = 0,
114 e1000_bus_width_pcie_x1,
115 e1000_bus_width_pcie_x2,
116 e1000_bus_width_pcie_x4 = 4,
117 e1000_bus_width_pcie_x8 = 8,
118 e1000_bus_width_32,
119 e1000_bus_width_64,
120 e1000_bus_width_reserved
121};
122
123enum e1000_1000t_rx_status {
124 e1000_1000t_rx_status_not_ok = 0,
125 e1000_1000t_rx_status_ok,
126 e1000_1000t_rx_status_undefined = 0xFF
127};
128
129enum e1000_rev_polarity {
130 e1000_rev_polarity_normal = 0,
131 e1000_rev_polarity_reversed,
132 e1000_rev_polarity_undefined = 0xFF
133};
134
135enum e1000_fc_type {
136 e1000_fc_none = 0,
137 e1000_fc_rx_pause,
138 e1000_fc_tx_pause,
139 e1000_fc_full,
140 e1000_fc_default = 0xFF
141};
142
143
144/* Receive Descriptor */
145struct e1000_rx_desc {
146 u64 buffer_addr; /* Address of the descriptor's data buffer */
147 u16 length; /* Length of data DMAed into data buffer */
148 u16 csum; /* Packet checksum */
149 u8 status; /* Descriptor status */
150 u8 errors; /* Descriptor Errors */
151 u16 special;
152};
153
154/* Receive Descriptor - Extended */
155union e1000_rx_desc_extended {
156 struct {
157 u64 buffer_addr;
158 u64 reserved;
159 } read;
160 struct {
161 struct {
162 u32 mrq; /* Multiple Rx Queues */
163 union {
164 u32 rss; /* RSS Hash */
165 struct {
166 u16 ip_id; /* IP id */
167 u16 csum; /* Packet Checksum */
168 } csum_ip;
169 } hi_dword;
170 } lower;
171 struct {
172 u32 status_error; /* ext status/error */
173 u16 length;
174 u16 vlan; /* VLAN tag */
175 } upper;
176 } wb; /* writeback */
177};
178
179#define MAX_PS_BUFFERS 4
180/* Receive Descriptor - Packet Split */
181union e1000_rx_desc_packet_split {
182 struct {
183 /* one buffer for protocol header(s), three data buffers */
184 u64 buffer_addr[MAX_PS_BUFFERS];
185 } read;
186 struct {
187 struct {
188 u32 mrq; /* Multiple Rx Queues */
189 union {
190 u32 rss; /* RSS Hash */
191 struct {
192 u16 ip_id; /* IP id */
193 u16 csum; /* Packet Checksum */
194 } csum_ip;
195 } hi_dword;
196 } lower;
197 struct {
198 u32 status_error; /* ext status/error */
199 u16 length0; /* length of buffer 0 */
200 u16 vlan; /* VLAN tag */
201 } middle;
202 struct {
203 u16 header_status;
204 u16 length[3]; /* length of buffers 1-3 */
205 } upper;
206 u64 reserved;
207 } wb; /* writeback */
208};
209
210/* Transmit Descriptor */
211struct e1000_tx_desc {
212 u64 buffer_addr; /* Address of the descriptor's data buffer */
213 union {
214 u32 data;
215 struct {
216 u16 length; /* Data buffer length */
217 u8 cso; /* Checksum offset */
218 u8 cmd; /* Descriptor control */
219 } flags;
220 } lower;
221 union {
222 u32 data;
223 struct {
224 u8 status; /* Descriptor status */
225 u8 css; /* Checksum start */
226 u16 special;
227 } fields;
228 } upper;
229};
230
231/* Offload Context Descriptor */
232struct e1000_context_desc {
233 union {
234 u32 ip_config;
235 struct {
236 u8 ipcss; /* IP checksum start */
237 u8 ipcso; /* IP checksum offset */
238 u16 ipcse; /* IP checksum end */
239 } ip_fields;
240 } lower_setup;
241 union {
242 u32 tcp_config;
243 struct {
244 u8 tucss; /* TCP checksum start */
245 u8 tucso; /* TCP checksum offset */
246 u16 tucse; /* TCP checksum end */
247 } tcp_fields;
248 } upper_setup;
249 u32 cmd_and_length;
250 union {
251 u32 data;
252 struct {
253 u8 status; /* Descriptor status */
254 u8 hdr_len; /* Header length */
255 u16 mss; /* Maximum segment size */
256 } fields;
257 } tcp_seg_setup;
258};
259
260/* Offload data descriptor */
261struct e1000_data_desc {
262 u64 buffer_addr; /* Address of the descriptor's buffer address */
263 union {
264 u32 data;
265 struct {
266 u16 length; /* Data buffer length */
267 u8 typ_len_ext;
268 u8 cmd;
269 } flags;
270 } lower;
271 union {
272 u32 data;
273 struct {
274 u8 status; /* Descriptor status */
275 u8 popts; /* Packet Options */
276 u16 special;
277 } fields;
278 } upper;
279};
280
281/* Statistics counters collected by the MAC */
282struct e1000_hw_stats {
283 u64 crcerrs;
284 u64 algnerrc;
285 u64 symerrs;
286 u64 rxerrc;
287 u64 mpc;
288 u64 scc;
289 u64 ecol;
290 u64 mcc;
291 u64 latecol;
292 u64 colc;
293 u64 dc;
294 u64 tncrs;
295 u64 sec;
296 u64 cexterr;
297 u64 rlec;
298 u64 xonrxc;
299 u64 xontxc;
300 u64 xoffrxc;
301 u64 xofftxc;
302 u64 fcruc;
303 u64 prc64;
304 u64 prc127;
305 u64 prc255;
306 u64 prc511;
307 u64 prc1023;
308 u64 prc1522;
309 u64 gprc;
310 u64 bprc;
311 u64 mprc;
312 u64 gptc;
313 u64 gorc;
314 u64 gotc;
315 u64 rnbc;
316 u64 ruc;
317 u64 rfc;
318 u64 roc;
319 u64 rjc;
320 u64 mgprc;
321 u64 mgpdc;
322 u64 mgptc;
323 u64 tor;
324 u64 tot;
325 u64 tpr;
326 u64 tpt;
327 u64 ptc64;
328 u64 ptc127;
329 u64 ptc255;
330 u64 ptc511;
331 u64 ptc1023;
332 u64 ptc1522;
333 u64 mptc;
334 u64 bptc;
335 u64 tsctc;
336 u64 tsctfc;
337 u64 iac;
338 u64 icrxptc;
339 u64 icrxatc;
340 u64 ictxptc;
341 u64 ictxatc;
342 u64 ictxqec;
343 u64 ictxqmtc;
344 u64 icrxdmtc;
345 u64 icrxoc;
346 u64 cbtmpc;
347 u64 htdpmc;
348 u64 cbrdpc;
349 u64 cbrmpc;
350 u64 rpthc;
351 u64 hgptc;
352 u64 htcbdpc;
353 u64 hgorc;
354 u64 hgotc;
355 u64 lenerrs;
356 u64 scvpc;
357 u64 hrmpc;
358};
359
360struct e1000_phy_stats {
361 u32 idle_errors;
362 u32 receive_errors;
363};
364
365struct e1000_host_mng_dhcp_cookie {
366 u32 signature;
367 u8 status;
368 u8 reserved0;
369 u16 vlan_id;
370 u32 reserved1;
371 u16 reserved2;
372 u8 reserved3;
373 u8 checksum;
374};
375
376/* Host Interface "Rev 1" */
377struct e1000_host_command_header {
378 u8 command_id;
379 u8 command_length;
380 u8 command_options;
381 u8 checksum;
382};
383
384#define E1000_HI_MAX_DATA_LENGTH 252
385struct e1000_host_command_info {
386 struct e1000_host_command_header command_header;
387 u8 command_data[E1000_HI_MAX_DATA_LENGTH];
388};
389
390/* Host Interface "Rev 2" */
391struct e1000_host_mng_command_header {
392 u8 command_id;
393 u8 checksum;
394 u16 reserved1;
395 u16 reserved2;
396 u16 command_length;
397};
398
399#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
400struct e1000_host_mng_command_info {
401 struct e1000_host_mng_command_header command_header;
402 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
403};
404
405#include "e1000_mac.h"
406#include "e1000_phy.h"
407#include "e1000_nvm.h"
408
409struct e1000_mac_operations {
410 s32 (*check_for_link)(struct e1000_hw *);
411 s32 (*reset_hw)(struct e1000_hw *);
412 s32 (*init_hw)(struct e1000_hw *);
413 s32 (*setup_physical_interface)(struct e1000_hw *);
414 void (*rar_set)(struct e1000_hw *, u8 *, u32);
415 s32 (*read_mac_addr)(struct e1000_hw *);
416 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
417};
418
419struct e1000_phy_operations {
420 s32 (*acquire_phy)(struct e1000_hw *);
421 s32 (*force_speed_duplex)(struct e1000_hw *);
422 s32 (*get_cfg_done)(struct e1000_hw *hw);
423 s32 (*get_cable_length)(struct e1000_hw *);
424 s32 (*get_phy_info)(struct e1000_hw *);
425 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
426 void (*release_phy)(struct e1000_hw *);
427 s32 (*reset_phy)(struct e1000_hw *);
428 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
429 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
430 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
431};
432
433struct e1000_nvm_operations {
434 s32 (*acquire_nvm)(struct e1000_hw *);
435 s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
436 void (*release_nvm)(struct e1000_hw *);
437 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
438};
439
440struct e1000_info {
441 s32 (*get_invariants)(struct e1000_hw *);
442 struct e1000_mac_operations *mac_ops;
443 struct e1000_phy_operations *phy_ops;
444 struct e1000_nvm_operations *nvm_ops;
445};
446
447extern const struct e1000_info e1000_82575_info;
448
449struct e1000_mac_info {
450 struct e1000_mac_operations ops;
451
452 u8 addr[6];
453 u8 perm_addr[6];
454
455 enum e1000_mac_type type;
456
457 u32 collision_delta;
458 u32 ledctl_default;
459 u32 ledctl_mode1;
460 u32 ledctl_mode2;
461 u32 mc_filter_type;
462 u32 tx_packet_delta;
463 u32 txcw;
464
465 u16 current_ifs_val;
466 u16 ifs_max_val;
467 u16 ifs_min_val;
468 u16 ifs_ratio;
469 u16 ifs_step_size;
470 u16 mta_reg_count;
471 u16 rar_entry_count;
472
473 u8 forced_speed_duplex;
474
475 bool adaptive_ifs;
476 bool arc_subsystem_valid;
477 bool asf_firmware_present;
478 bool autoneg;
479 bool autoneg_failed;
480 bool disable_av;
481 bool disable_hw_init_bits;
482 bool get_link_status;
483 bool ifs_params_forced;
484 bool in_ifs_mode;
485 bool report_tx_early;
486 bool serdes_has_link;
487 bool tx_pkt_filtering;
488};
489
490struct e1000_phy_info {
491 struct e1000_phy_operations ops;
492
493 enum e1000_phy_type type;
494
495 enum e1000_1000t_rx_status local_rx;
496 enum e1000_1000t_rx_status remote_rx;
497 enum e1000_ms_type ms_type;
498 enum e1000_ms_type original_ms_type;
499 enum e1000_rev_polarity cable_polarity;
500 enum e1000_smart_speed smart_speed;
501
502 u32 addr;
503 u32 id;
504 u32 reset_delay_us; /* in usec */
505 u32 revision;
506
507 enum e1000_media_type media_type;
508
509 u16 autoneg_advertised;
510 u16 autoneg_mask;
511 u16 cable_length;
512 u16 max_cable_length;
513 u16 min_cable_length;
514
515 u8 mdix;
516
517 bool disable_polarity_correction;
518 bool is_mdix;
519 bool polarity_correction;
520 bool reset_disable;
521 bool speed_downgraded;
522 bool autoneg_wait_to_complete;
523};
524
525struct e1000_nvm_info {
526 struct e1000_nvm_operations ops;
527
528 enum e1000_nvm_type type;
529 enum e1000_nvm_override override;
530
531 u32 flash_bank_size;
532 u32 flash_base_addr;
533
534 u16 word_size;
535 u16 delay_usec;
536 u16 address_bits;
537 u16 opcode_bits;
538 u16 page_size;
539};
540
541struct e1000_bus_info {
542 enum e1000_bus_type type;
543 enum e1000_bus_speed speed;
544 enum e1000_bus_width width;
545
546 u32 snoop;
547
548 u16 func;
549 u16 pci_cmd_word;
550};
551
552struct e1000_fc_info {
553 u32 high_water; /* Flow control high-water mark */
554 u32 low_water; /* Flow control low-water mark */
555 u16 pause_time; /* Flow control pause timer */
556 bool send_xon; /* Flow control send XON */
557 bool strict_ieee; /* Strict IEEE mode */
558 enum e1000_fc_type type; /* Type of flow control */
559 enum e1000_fc_type original_type;
560};
561
562struct e1000_hw {
563 void *back;
564 void *dev_spec;
565
566 u8 __iomem *hw_addr;
567 u8 __iomem *flash_address;
568 unsigned long io_base;
569
570 struct e1000_mac_info mac;
571 struct e1000_fc_info fc;
572 struct e1000_phy_info phy;
573 struct e1000_nvm_info nvm;
574 struct e1000_bus_info bus;
575 struct e1000_host_mng_dhcp_cookie mng_cookie;
576
577 u32 dev_spec_size;
578
579 u16 device_id;
580 u16 subsystem_vendor_id;
581 u16 subsystem_device_id;
582 u16 vendor_id;
583
584 u8 revision_id;
585};
586
587#ifdef DEBUG
588extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
589#define hw_dbg(hw, format, arg...) \
590 printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
591#else
592static inline int __attribute__ ((format (printf, 2, 3)))
593hw_dbg(struct e1000_hw *hw, const char *format, ...)
594{
595 return 0;
596}
597#endif
598
599#endif
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
new file mode 100644
index 000000000000..3e84a3f0c1d8
--- /dev/null
+++ b/drivers/net/igb/e1000_mac.c
@@ -0,0 +1,1505 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32
33#include "e1000_mac.h"
34
35#include "igb.h"
36
37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
40
41/**
42 * e1000_remove_device - Free device specific structure
43 * @hw: pointer to the HW structure
44 *
45 * If a device specific structure was allocated, this function will
46 * free it.
47 **/
48void igb_remove_device(struct e1000_hw *hw)
49{
50 /* Freeing the dev_spec member of e1000_hw structure */
51 kfree(hw->dev_spec);
52}
53
54static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
55{
56 struct igb_adapter *adapter = hw->back;
57
58 pci_read_config_word(adapter->pdev, reg, value);
59}
60
61static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
62{
63 struct igb_adapter *adapter = hw->back;
64 u16 cap_offset;
65
66 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
67 if (!cap_offset)
68 return -E1000_ERR_CONFIG;
69
70 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
71
72 return 0;
73}
74
75/**
76 * e1000_get_bus_info_pcie - Get PCIe bus information
77 * @hw: pointer to the HW structure
78 *
79 * Determines and stores the system bus information for a particular
80 * network interface. The following bus information is determined and stored:
81 * bus speed, bus width, type (PCIe), and PCIe function.
82 **/
83s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
84{
85 struct e1000_bus_info *bus = &hw->bus;
86 s32 ret_val;
87 u32 status;
88 u16 pcie_link_status, pci_header_type;
89
90 bus->type = e1000_bus_type_pci_express;
91 bus->speed = e1000_bus_speed_2500;
92
93 ret_val = igb_read_pcie_cap_reg(hw,
94 PCIE_LINK_STATUS,
95 &pcie_link_status);
96 if (ret_val)
97 bus->width = e1000_bus_width_unknown;
98 else
99 bus->width = (enum e1000_bus_width)((pcie_link_status &
100 PCIE_LINK_WIDTH_MASK) >>
101 PCIE_LINK_WIDTH_SHIFT);
102
103 igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
104 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
105 status = rd32(E1000_STATUS);
106 bus->func = (status & E1000_STATUS_FUNC_MASK)
107 >> E1000_STATUS_FUNC_SHIFT;
108 } else {
109 bus->func = 0;
110 }
111
112 return 0;
113}
114
115/**
116 * e1000_clear_vfta - Clear VLAN filter table
117 * @hw: pointer to the HW structure
118 *
119 * Clears the register array which contains the VLAN filter table by
120 * setting all the values to 0.
121 **/
122void igb_clear_vfta(struct e1000_hw *hw)
123{
124 u32 offset;
125
126 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
127 array_wr32(E1000_VFTA, offset, 0);
128 wrfl();
129 }
130}
131
132/**
133 * e1000_write_vfta - Write value to VLAN filter table
134 * @hw: pointer to the HW structure
135 * @offset: register offset in VLAN filter table
136 * @value: register value written to VLAN filter table
137 *
138 * Writes value at the given offset in the register array which stores
139 * the VLAN filter table.
140 **/
141void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
142{
143 array_wr32(E1000_VFTA, offset, value);
144 wrfl();
145}
146
147/**
148 * e1000_init_rx_addrs - Initialize receive address's
149 * @hw: pointer to the HW structure
150 * @rar_count: receive address registers
151 *
152 * Setups the receive address registers by setting the base receive address
153 * register to the devices MAC address and clearing all the other receive
154 * address registers to 0.
155 **/
156void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
157{
158 u32 i;
159
160 /* Setup the receive address */
161 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
162
163 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
164
165 /* Zero out the other (rar_entry_count - 1) receive addresses */
166 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
167 for (i = 1; i < rar_count; i++) {
168 array_wr32(E1000_RA, (i << 1), 0);
169 wrfl();
170 array_wr32(E1000_RA, ((i << 1) + 1), 0);
171 wrfl();
172 }
173}
174
175/**
176 * e1000_check_alt_mac_addr - Check for alternate MAC addr
177 * @hw: pointer to the HW structure
178 *
179 * Checks the nvm for an alternate MAC address. An alternate MAC address
180 * can be setup by pre-boot software and must be treated like a permanent
181 * address and must override the actual permanent MAC address. If an
182 * alternate MAC address is fopund it is saved in the hw struct and
183 * prgrammed into RAR0 and the cuntion returns success, otherwise the
184 * fucntion returns an error.
185 **/
186s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
187{
188 u32 i;
189 s32 ret_val = 0;
190 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
191 u8 alt_mac_addr[ETH_ALEN];
192
193 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset);
195 if (ret_val) {
196 hw_dbg(hw, "NVM Read Error\n");
197 goto out;
198 }
199
200 if (nvm_alt_mac_addr_offset == 0xFFFF) {
201 ret_val = -(E1000_NOT_IMPLEMENTED);
202 goto out;
203 }
204
205 if (hw->bus.func == E1000_FUNC_1)
206 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
207
208 for (i = 0; i < ETH_ALEN; i += 2) {
209 offset = nvm_alt_mac_addr_offset + (i >> 1);
210 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
211 if (ret_val) {
212 hw_dbg(hw, "NVM Read Error\n");
213 goto out;
214 }
215
216 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
217 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
218 }
219
220 /* if multicast bit is set, the alternate address will not be used */
221 if (alt_mac_addr[0] & 0x01) {
222 ret_val = -(E1000_NOT_IMPLEMENTED);
223 goto out;
224 }
225
226 for (i = 0; i < ETH_ALEN; i++)
227 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
228
229 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
230
231out:
232 return ret_val;
233}
234
235/**
236 * e1000_rar_set - Set receive address register
237 * @hw: pointer to the HW structure
238 * @addr: pointer to the receive address
239 * @index: receive address array register
240 *
241 * Sets the receive address array register at index to the address passed
242 * in by addr.
243 **/
244void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
245{
246 u32 rar_low, rar_high;
247
248 /*
249 * HW expects these in little endian so we reverse the byte order
250 * from network order (big endian) to little endian
251 */
252 rar_low = ((u32) addr[0] |
253 ((u32) addr[1] << 8) |
254 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
255
256 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
257
258 if (!hw->mac.disable_av)
259 rar_high |= E1000_RAH_AV;
260
261 array_wr32(E1000_RA, (index << 1), rar_low);
262 array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
263}
264
265/**
266 * e1000_mta_set - Set multicast filter table address
267 * @hw: pointer to the HW structure
268 * @hash_value: determines the MTA register and bit to set
269 *
270 * The multicast table address is a register array of 32-bit registers.
271 * The hash_value is used to determine what register the bit is in, the
272 * current value is read, the new bit is OR'd in and the new value is
273 * written back into the register.
274 **/
275static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
276{
277 u32 hash_bit, hash_reg, mta;
278
279 /*
280 * The MTA is a register array of 32-bit registers. It is
281 * treated like an array of (32*mta_reg_count) bits. We want to
282 * set bit BitArray[hash_value]. So we figure out what register
283 * the bit is in, read it, OR in the new bit, then write
284 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
285 * mask to bits 31:5 of the hash value which gives us the
286 * register we're modifying. The hash bit within that register
287 * is determined by the lower 5 bits of the hash value.
288 */
289 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
290 hash_bit = hash_value & 0x1F;
291
292 mta = array_rd32(E1000_MTA, hash_reg);
293
294 mta |= (1 << hash_bit);
295
296 array_wr32(E1000_MTA, hash_reg, mta);
297 wrfl();
298}
299
300/**
301 * e1000_update_mc_addr_list - Update Multicast addresses
302 * @hw: pointer to the HW structure
303 * @mc_addr_list: array of multicast addresses to program
304 * @mc_addr_count: number of multicast addresses to program
305 * @rar_used_count: the first RAR register free to program
306 * @rar_count: total number of supported Receive Address Registers
307 *
308 * Updates the Receive Address Registers and Multicast Table Array.
309 * The caller must have a packed mc_addr_list of multicast addresses.
310 * The parameter rar_count will usually be hw->mac.rar_entry_count
311 * unless there are workarounds that change this.
312 **/
313void igb_update_mc_addr_list(struct e1000_hw *hw,
314 u8 *mc_addr_list, u32 mc_addr_count,
315 u32 rar_used_count, u32 rar_count)
316{
317 u32 hash_value;
318 u32 i;
319
320 /*
321 * Load the first set of multicast addresses into the exact
322 * filters (RAR). If there are not enough to fill the RAR
323 * array, clear the filters.
324 */
325 for (i = rar_used_count; i < rar_count; i++) {
326 if (mc_addr_count) {
327 hw->mac.ops.rar_set(hw, mc_addr_list, i);
328 mc_addr_count--;
329 mc_addr_list += ETH_ALEN;
330 } else {
331 array_wr32(E1000_RA, i << 1, 0);
332 wrfl();
333 array_wr32(E1000_RA, (i << 1) + 1, 0);
334 wrfl();
335 }
336 }
337
338 /* Clear the old settings from the MTA */
339 hw_dbg(hw, "Clearing MTA\n");
340 for (i = 0; i < hw->mac.mta_reg_count; i++) {
341 array_wr32(E1000_MTA, i, 0);
342 wrfl();
343 }
344
345 /* Load any remaining multicast addresses into the hash table. */
346 for (; mc_addr_count > 0; mc_addr_count--) {
347 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
348 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
349 igb_mta_set(hw, hash_value);
350 mc_addr_list += ETH_ALEN;
351 }
352}
353
354/**
355 * e1000_hash_mc_addr - Generate a multicast hash value
356 * @hw: pointer to the HW structure
357 * @mc_addr: pointer to a multicast address
358 *
359 * Generates a multicast address hash value which is used to determine
360 * the multicast filter table array address and new table value. See
361 * igb_mta_set()
362 **/
363static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
364{
365 u32 hash_value, hash_mask;
366 u8 bit_shift = 0;
367
368 /* Register count multiplied by bits per register */
369 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
370
371 /*
372 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
373 * where 0xFF would still fall within the hash mask.
374 */
375 while (hash_mask >> bit_shift != 0xFF)
376 bit_shift++;
377
378 /*
379 * The portion of the address that is used for the hash table
380 * is determined by the mc_filter_type setting.
381 * The algorithm is such that there is a total of 8 bits of shifting.
382 * The bit_shift for a mc_filter_type of 0 represents the number of
383 * left-shifts where the MSB of mc_addr[5] would still fall within
384 * the hash_mask. Case 0 does this exactly. Since there are a total
385 * of 8 bits of shifting, then mc_addr[4] will shift right the
386 * remaining number of bits. Thus 8 - bit_shift. The rest of the
387 * cases are a variation of this algorithm...essentially raising the
388 * number of bits to shift mc_addr[5] left, while still keeping the
389 * 8-bit shifting total.
390 *
391 * For example, given the following Destination MAC Address and an
392 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
393 * we can see that the bit_shift for case 0 is 4. These are the hash
394 * values resulting from each mc_filter_type...
395 * [0] [1] [2] [3] [4] [5]
396 * 01 AA 00 12 34 56
397 * LSB MSB
398 *
399 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
400 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
401 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
402 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
403 */
404 switch (hw->mac.mc_filter_type) {
405 default:
406 case 0:
407 break;
408 case 1:
409 bit_shift += 1;
410 break;
411 case 2:
412 bit_shift += 2;
413 break;
414 case 3:
415 bit_shift += 4;
416 break;
417 }
418
419 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
420 (((u16) mc_addr[5]) << bit_shift)));
421
422 return hash_value;
423}
424
425/**
426 * e1000_clear_hw_cntrs_base - Clear base hardware counters
427 * @hw: pointer to the HW structure
428 *
429 * Clears the base hardware counters by reading the counter registers.
430 **/
431void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
432{
433 u32 temp;
434
435 temp = rd32(E1000_CRCERRS);
436 temp = rd32(E1000_SYMERRS);
437 temp = rd32(E1000_MPC);
438 temp = rd32(E1000_SCC);
439 temp = rd32(E1000_ECOL);
440 temp = rd32(E1000_MCC);
441 temp = rd32(E1000_LATECOL);
442 temp = rd32(E1000_COLC);
443 temp = rd32(E1000_DC);
444 temp = rd32(E1000_SEC);
445 temp = rd32(E1000_RLEC);
446 temp = rd32(E1000_XONRXC);
447 temp = rd32(E1000_XONTXC);
448 temp = rd32(E1000_XOFFRXC);
449 temp = rd32(E1000_XOFFTXC);
450 temp = rd32(E1000_FCRUC);
451 temp = rd32(E1000_GPRC);
452 temp = rd32(E1000_BPRC);
453 temp = rd32(E1000_MPRC);
454 temp = rd32(E1000_GPTC);
455 temp = rd32(E1000_GORCL);
456 temp = rd32(E1000_GORCH);
457 temp = rd32(E1000_GOTCL);
458 temp = rd32(E1000_GOTCH);
459 temp = rd32(E1000_RNBC);
460 temp = rd32(E1000_RUC);
461 temp = rd32(E1000_RFC);
462 temp = rd32(E1000_ROC);
463 temp = rd32(E1000_RJC);
464 temp = rd32(E1000_TORL);
465 temp = rd32(E1000_TORH);
466 temp = rd32(E1000_TOTL);
467 temp = rd32(E1000_TOTH);
468 temp = rd32(E1000_TPR);
469 temp = rd32(E1000_TPT);
470 temp = rd32(E1000_MPTC);
471 temp = rd32(E1000_BPTC);
472}
473
474/**
475 * e1000_check_for_copper_link - Check for link (Copper)
476 * @hw: pointer to the HW structure
477 *
478 * Checks to see of the link status of the hardware has changed. If a
479 * change in link status has been detected, then we read the PHY registers
480 * to get the current speed/duplex if link exists.
481 **/
482s32 igb_check_for_copper_link(struct e1000_hw *hw)
483{
484 struct e1000_mac_info *mac = &hw->mac;
485 s32 ret_val;
486 bool link;
487
488 /*
489 * We only want to go out to the PHY registers to see if Auto-Neg
490 * has completed and/or if our link status has changed. The
491 * get_link_status flag is set upon receiving a Link Status
492 * Change or Rx Sequence Error interrupt.
493 */
494 if (!mac->get_link_status) {
495 ret_val = 0;
496 goto out;
497 }
498
499 /*
500 * First we want to see if the MII Status Register reports
501 * link. If so, then we want to get the current speed/duplex
502 * of the PHY.
503 */
504 ret_val = igb_phy_has_link(hw, 1, 0, &link);
505 if (ret_val)
506 goto out;
507
508 if (!link)
509 goto out; /* No link detected */
510
511 mac->get_link_status = false;
512
513 /*
514 * Check if there was DownShift, must be checked
515 * immediately after link-up
516 */
517 igb_check_downshift(hw);
518
519 /*
520 * If we are forcing speed/duplex, then we simply return since
521 * we have already determined whether we have link or not.
522 */
523 if (!mac->autoneg) {
524 ret_val = -E1000_ERR_CONFIG;
525 goto out;
526 }
527
528 /*
529 * Auto-Neg is enabled. Auto Speed Detection takes care
530 * of MAC speed/duplex configuration. So we only need to
531 * configure Collision Distance in the MAC.
532 */
533 igb_config_collision_dist(hw);
534
535 /*
536 * Configure Flow Control now that Auto-Neg has completed.
537 * First, we need to restore the desired flow control
538 * settings because we may have had to re-autoneg with a
539 * different link partner.
540 */
541 ret_val = igb_config_fc_after_link_up(hw);
542 if (ret_val)
543 hw_dbg(hw, "Error configuring flow control\n");
544
545out:
546 return ret_val;
547}
548
549/**
550 * e1000_setup_link - Setup flow control and link settings
551 * @hw: pointer to the HW structure
552 *
553 * Determines which flow control settings to use, then configures flow
554 * control. Calls the appropriate media-specific link configuration
555 * function. Assuming the adapter has a valid link partner, a valid link
556 * should be established. Assumes the hardware has previously been reset
557 * and the transmitter and receiver are not enabled.
558 **/
559s32 igb_setup_link(struct e1000_hw *hw)
560{
561 s32 ret_val = 0;
562
563 /*
564 * In the case of the phy reset being blocked, we already have a link.
565 * We do not need to set it up again.
566 */
567 if (igb_check_reset_block(hw))
568 goto out;
569
570 ret_val = igb_set_default_fc(hw);
571 if (ret_val)
572 goto out;
573
574 /*
575 * We want to save off the original Flow Control configuration just
576 * in case we get disconnected and then reconnected into a different
577 * hub or switch with different Flow Control capabilities.
578 */
579 hw->fc.original_type = hw->fc.type;
580
581 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
582
583 /* Call the necessary media_type subroutine to configure the link. */
584 ret_val = hw->mac.ops.setup_physical_interface(hw);
585 if (ret_val)
586 goto out;
587
588 /*
589 * Initialize the flow control address, type, and PAUSE timer
590 * registers to their default values. This is done even if flow
591 * control is disabled, because it does not hurt anything to
592 * initialize these registers.
593 */
594 hw_dbg(hw,
595 "Initializing the Flow Control address, type and timer regs\n");
596 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
597 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
598 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
599
600 wr32(E1000_FCTTV, hw->fc.pause_time);
601
602 ret_val = igb_set_fc_watermarks(hw);
603
604out:
605 return ret_val;
606}
607
608/**
609 * e1000_config_collision_dist - Configure collision distance
610 * @hw: pointer to the HW structure
611 *
612 * Configures the collision distance to the default value and is used
613 * during link setup. Currently no func pointer exists and all
614 * implementations are handled in the generic version of this function.
615 **/
616void igb_config_collision_dist(struct e1000_hw *hw)
617{
618 u32 tctl;
619
620 tctl = rd32(E1000_TCTL);
621
622 tctl &= ~E1000_TCTL_COLD;
623 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
624
625 wr32(E1000_TCTL, tctl);
626 wrfl();
627}
628
629/**
630 * e1000_set_fc_watermarks - Set flow control high/low watermarks
631 * @hw: pointer to the HW structure
632 *
633 * Sets the flow control high/low threshold (watermark) registers. If
634 * flow control XON frame transmission is enabled, then set XON frame
635 * tansmission as well.
636 **/
637static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
638{
639 s32 ret_val = 0;
640 u32 fcrtl = 0, fcrth = 0;
641
642 /*
643 * Set the flow control receive threshold registers. Normally,
644 * these registers will be set to a default threshold that may be
645 * adjusted later by the driver's runtime code. However, if the
646 * ability to transmit pause frames is not enabled, then these
647 * registers will be set to 0.
648 */
649 if (hw->fc.type & e1000_fc_tx_pause) {
650 /*
651 * We need to set up the Receive Threshold high and low water
652 * marks as well as (optionally) enabling the transmission of
653 * XON frames.
654 */
655 fcrtl = hw->fc.low_water;
656 if (hw->fc.send_xon)
657 fcrtl |= E1000_FCRTL_XONE;
658
659 fcrth = hw->fc.high_water;
660 }
661 wr32(E1000_FCRTL, fcrtl);
662 wr32(E1000_FCRTH, fcrth);
663
664 return ret_val;
665}
666
667/**
668 * e1000_set_default_fc - Set flow control default values
669 * @hw: pointer to the HW structure
670 *
671 * Read the EEPROM for the default values for flow control and store the
672 * values.
673 **/
674static s32 igb_set_default_fc(struct e1000_hw *hw)
675{
676 s32 ret_val = 0;
677 u16 nvm_data;
678
679 /*
680 * Read and store word 0x0F of the EEPROM. This word contains bits
681 * that determine the hardware's default PAUSE (flow control) mode,
682 * a bit that determines whether the HW defaults to enabling or
683 * disabling auto-negotiation, and the direction of the
684 * SW defined pins. If there is no SW over-ride of the flow
685 * control setting, then the variable hw->fc will
686 * be initialized based on a value in the EEPROM.
687 */
688 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1,
689 &nvm_data);
690
691 if (ret_val) {
692 hw_dbg(hw, "NVM Read Error\n");
693 goto out;
694 }
695
696 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
697 hw->fc.type = e1000_fc_none;
698 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
699 NVM_WORD0F_ASM_DIR)
700 hw->fc.type = e1000_fc_tx_pause;
701 else
702 hw->fc.type = e1000_fc_full;
703
704out:
705 return ret_val;
706}
707
708/**
709 * e1000_force_mac_fc - Force the MAC's flow control settings
710 * @hw: pointer to the HW structure
711 *
712 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
713 * device control register to reflect the adapter settings. TFCE and RFCE
714 * need to be explicitly set by software when a copper PHY is used because
715 * autonegotiation is managed by the PHY rather than the MAC. Software must
716 * also configure these bits when link is forced on a fiber connection.
717 **/
718s32 igb_force_mac_fc(struct e1000_hw *hw)
719{
720 u32 ctrl;
721 s32 ret_val = 0;
722
723 ctrl = rd32(E1000_CTRL);
724
725 /*
726 * Because we didn't get link via the internal auto-negotiation
727 * mechanism (we either forced link or we got link via PHY
728 * auto-neg), we have to manually enable/disable transmit an
729 * receive flow control.
730 *
731 * The "Case" statement below enables/disable flow control
732 * according to the "hw->fc.type" parameter.
733 *
734 * The possible values of the "fc" parameter are:
735 * 0: Flow control is completely disabled
736 * 1: Rx flow control is enabled (we can receive pause
737 * frames but not send pause frames).
738 * 2: Tx flow control is enabled (we can send pause frames
739 * frames but we do not receive pause frames).
740 * 3: Both Rx and TX flow control (symmetric) is enabled.
741 * other: No other values should be possible at this point.
742 */
743 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
744
745 switch (hw->fc.type) {
746 case e1000_fc_none:
747 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
748 break;
749 case e1000_fc_rx_pause:
750 ctrl &= (~E1000_CTRL_TFCE);
751 ctrl |= E1000_CTRL_RFCE;
752 break;
753 case e1000_fc_tx_pause:
754 ctrl &= (~E1000_CTRL_RFCE);
755 ctrl |= E1000_CTRL_TFCE;
756 break;
757 case e1000_fc_full:
758 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
759 break;
760 default:
761 hw_dbg(hw, "Flow control param set incorrectly\n");
762 ret_val = -E1000_ERR_CONFIG;
763 goto out;
764 }
765
766 wr32(E1000_CTRL, ctrl);
767
768out:
769 return ret_val;
770}
771
772/**
773 * e1000_config_fc_after_link_up - Configures flow control after link
774 * @hw: pointer to the HW structure
775 *
776 * Checks the status of auto-negotiation after link up to ensure that the
777 * speed and duplex were not forced. If the link needed to be forced, then
778 * flow control needs to be forced also. If auto-negotiation is enabled
779 * and did not fail, then we configure flow control based on our link
780 * partner.
781 **/
782s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
783{
784 struct e1000_mac_info *mac = &hw->mac;
785 s32 ret_val = 0;
786 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
787 u16 speed, duplex;
788
789 /*
790 * Check for the case where we have fiber media and auto-neg failed
791 * so we had to force link. In this case, we need to force the
792 * configuration of the MAC to match the "fc" parameter.
793 */
794 if (mac->autoneg_failed) {
795 if (hw->phy.media_type == e1000_media_type_fiber ||
796 hw->phy.media_type == e1000_media_type_internal_serdes)
797 ret_val = igb_force_mac_fc(hw);
798 } else {
799 if (hw->phy.media_type == e1000_media_type_copper)
800 ret_val = igb_force_mac_fc(hw);
801 }
802
803 if (ret_val) {
804 hw_dbg(hw, "Error forcing flow control settings\n");
805 goto out;
806 }
807
808 /*
809 * Check for the case where we have copper media and auto-neg is
810 * enabled. In this case, we need to check and see if Auto-Neg
811 * has completed, and if so, how the PHY and link partner has
812 * flow control configured.
813 */
814 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
815 /*
816 * Read the MII Status Register and check to see if AutoNeg
817 * has completed. We read this twice because this reg has
818 * some "sticky" (latched) bits.
819 */
820 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
821 &mii_status_reg);
822 if (ret_val)
823 goto out;
824 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
825 &mii_status_reg);
826 if (ret_val)
827 goto out;
828
829 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
830 hw_dbg(hw, "Copper PHY and Auto Neg "
831 "has not completed.\n");
832 goto out;
833 }
834
835 /*
836 * The AutoNeg process has completed, so we now need to
837 * read both the Auto Negotiation Advertisement
838 * Register (Address 4) and the Auto_Negotiation Base
839 * Page Ability Register (Address 5) to determine how
840 * flow control was negotiated.
841 */
842 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
843 &mii_nway_adv_reg);
844 if (ret_val)
845 goto out;
846 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY,
847 &mii_nway_lp_ability_reg);
848 if (ret_val)
849 goto out;
850
851 /*
852 * Two bits in the Auto Negotiation Advertisement Register
853 * (Address 4) and two bits in the Auto Negotiation Base
854 * Page Ability Register (Address 5) determine flow control
855 * for both the PHY and the link partner. The following
856 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
857 * 1999, describes these PAUSE resolution bits and how flow
858 * control is determined based upon these settings.
859 * NOTE: DC = Don't Care
860 *
861 * LOCAL DEVICE | LINK PARTNER
862 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
863 *-------|---------|-------|---------|--------------------
864 * 0 | 0 | DC | DC | e1000_fc_none
865 * 0 | 1 | 0 | DC | e1000_fc_none
866 * 0 | 1 | 1 | 0 | e1000_fc_none
867 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
868 * 1 | 0 | 0 | DC | e1000_fc_none
869 * 1 | DC | 1 | DC | e1000_fc_full
870 * 1 | 1 | 0 | 0 | e1000_fc_none
871 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
872 *
873 * Are both PAUSE bits set to 1? If so, this implies
874 * Symmetric Flow Control is enabled at both ends. The
875 * ASM_DIR bits are irrelevant per the spec.
876 *
877 * For Symmetric Flow Control:
878 *
879 * LOCAL DEVICE | LINK PARTNER
880 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
881 *-------|---------|-------|---------|--------------------
882 * 1 | DC | 1 | DC | E1000_fc_full
883 *
884 */
885 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
886 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
887 /*
888 * Now we need to check if the user selected RX ONLY
889 * of pause frames. In this case, we had to advertise
890 * FULL flow control because we could not advertise RX
891 * ONLY. Hence, we must now check to see if we need to
892 * turn OFF the TRANSMISSION of PAUSE frames.
893 */
894 if (hw->fc.original_type == e1000_fc_full) {
895 hw->fc.type = e1000_fc_full;
896 hw_dbg(hw, "Flow Control = FULL.\r\n");
897 } else {
898 hw->fc.type = e1000_fc_rx_pause;
899 hw_dbg(hw, "Flow Control = "
900 "RX PAUSE frames only.\r\n");
901 }
902 }
903 /*
904 * For receiving PAUSE frames ONLY.
905 *
906 * LOCAL DEVICE | LINK PARTNER
907 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
908 *-------|---------|-------|---------|--------------------
909 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
910 */
911 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
912 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
913 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
914 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
915 hw->fc.type = e1000_fc_tx_pause;
916 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
917 }
918 /*
919 * For transmitting PAUSE frames ONLY.
920 *
921 * LOCAL DEVICE | LINK PARTNER
922 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
923 *-------|---------|-------|---------|--------------------
924 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
925 */
926 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
927 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
928 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
929 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
930 hw->fc.type = e1000_fc_rx_pause;
931 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
932 }
933 /*
934 * Per the IEEE spec, at this point flow control should be
935 * disabled. However, we want to consider that we could
936 * be connected to a legacy switch that doesn't advertise
937 * desired flow control, but can be forced on the link
938 * partner. So if we advertised no flow control, that is
939 * what we will resolve to. If we advertised some kind of
940 * receive capability (Rx Pause Only or Full Flow Control)
941 * and the link partner advertised none, we will configure
942 * ourselves to enable Rx Flow Control only. We can do
943 * this safely for two reasons: If the link partner really
944 * didn't want flow control enabled, and we enable Rx, no
945 * harm done since we won't be receiving any PAUSE frames
946 * anyway. If the intent on the link partner was to have
947 * flow control enabled, then by us enabling RX only, we
948 * can at least receive pause frames and process them.
949 * This is a good idea because in most cases, since we are
950 * predominantly a server NIC, more times than not we will
951 * be asked to delay transmission of packets than asking
952 * our link partner to pause transmission of frames.
953 */
954 else if ((hw->fc.original_type == e1000_fc_none ||
955 hw->fc.original_type == e1000_fc_tx_pause) ||
956 hw->fc.strict_ieee) {
957 hw->fc.type = e1000_fc_none;
958 hw_dbg(hw, "Flow Control = NONE.\r\n");
959 } else {
960 hw->fc.type = e1000_fc_rx_pause;
961 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
962 }
963
964 /*
965 * Now we need to do one last check... If we auto-
966 * negotiated to HALF DUPLEX, flow control should not be
967 * enabled per IEEE 802.3 spec.
968 */
969 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
970 if (ret_val) {
971 hw_dbg(hw, "Error getting link speed and duplex\n");
972 goto out;
973 }
974
975 if (duplex == HALF_DUPLEX)
976 hw->fc.type = e1000_fc_none;
977
978 /*
979 * Now we call a subroutine to actually force the MAC
980 * controller to use the correct flow control settings.
981 */
982 ret_val = igb_force_mac_fc(hw);
983 if (ret_val) {
984 hw_dbg(hw, "Error forcing flow control settings\n");
985 goto out;
986 }
987 }
988
989out:
990 return ret_val;
991}
992
993/**
994 * e1000_get_speed_and_duplex_copper - Retreive current speed/duplex
995 * @hw: pointer to the HW structure
996 * @speed: stores the current speed
997 * @duplex: stores the current duplex
998 *
999 * Read the status register for the current speed/duplex and store the current
1000 * speed and duplex for copper connections.
1001 **/
1002s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1003 u16 *duplex)
1004{
1005 u32 status;
1006
1007 status = rd32(E1000_STATUS);
1008 if (status & E1000_STATUS_SPEED_1000) {
1009 *speed = SPEED_1000;
1010 hw_dbg(hw, "1000 Mbs, ");
1011 } else if (status & E1000_STATUS_SPEED_100) {
1012 *speed = SPEED_100;
1013 hw_dbg(hw, "100 Mbs, ");
1014 } else {
1015 *speed = SPEED_10;
1016 hw_dbg(hw, "10 Mbs, ");
1017 }
1018
1019 if (status & E1000_STATUS_FD) {
1020 *duplex = FULL_DUPLEX;
1021 hw_dbg(hw, "Full Duplex\n");
1022 } else {
1023 *duplex = HALF_DUPLEX;
1024 hw_dbg(hw, "Half Duplex\n");
1025 }
1026
1027 return 0;
1028}
1029
1030/**
1031 * e1000_get_hw_semaphore - Acquire hardware semaphore
1032 * @hw: pointer to the HW structure
1033 *
1034 * Acquire the HW semaphore to access the PHY or NVM
1035 **/
1036s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1037{
1038 u32 swsm;
1039 s32 ret_val = 0;
1040 s32 timeout = hw->nvm.word_size + 1;
1041 s32 i = 0;
1042
1043 /* Get the SW semaphore */
1044 while (i < timeout) {
1045 swsm = rd32(E1000_SWSM);
1046 if (!(swsm & E1000_SWSM_SMBI))
1047 break;
1048
1049 udelay(50);
1050 i++;
1051 }
1052
1053 if (i == timeout) {
1054 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1055 ret_val = -E1000_ERR_NVM;
1056 goto out;
1057 }
1058
1059 /* Get the FW semaphore. */
1060 for (i = 0; i < timeout; i++) {
1061 swsm = rd32(E1000_SWSM);
1062 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1063
1064 /* Semaphore acquired if bit latched */
1065 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1066 break;
1067
1068 udelay(50);
1069 }
1070
1071 if (i == timeout) {
1072 /* Release semaphores */
1073 igb_put_hw_semaphore(hw);
1074 hw_dbg(hw, "Driver can't access the NVM\n");
1075 ret_val = -E1000_ERR_NVM;
1076 goto out;
1077 }
1078
1079out:
1080 return ret_val;
1081}
1082
1083/**
1084 * e1000_put_hw_semaphore - Release hardware semaphore
1085 * @hw: pointer to the HW structure
1086 *
1087 * Release hardware semaphore used to access the PHY or NVM
1088 **/
1089void igb_put_hw_semaphore(struct e1000_hw *hw)
1090{
1091 u32 swsm;
1092
1093 swsm = rd32(E1000_SWSM);
1094
1095 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1096
1097 wr32(E1000_SWSM, swsm);
1098}
1099
1100/**
1101 * e1000_get_auto_rd_done - Check for auto read completion
1102 * @hw: pointer to the HW structure
1103 *
1104 * Check EEPROM for Auto Read done bit.
1105 **/
1106s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1107{
1108 s32 i = 0;
1109 s32 ret_val = 0;
1110
1111
1112 while (i < AUTO_READ_DONE_TIMEOUT) {
1113 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1114 break;
1115 msleep(1);
1116 i++;
1117 }
1118
1119 if (i == AUTO_READ_DONE_TIMEOUT) {
1120 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1121 ret_val = -E1000_ERR_RESET;
1122 goto out;
1123 }
1124
1125out:
1126 return ret_val;
1127}
1128
1129/**
1130 * e1000_valid_led_default - Verify a valid default LED config
1131 * @hw: pointer to the HW structure
1132 * @data: pointer to the NVM (EEPROM)
1133 *
1134 * Read the EEPROM for the current default LED configuration. If the
1135 * LED configuration is not valid, set to a valid LED configuration.
1136 **/
1137static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1138{
1139 s32 ret_val;
1140
1141 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1142 if (ret_val) {
1143 hw_dbg(hw, "NVM Read Error\n");
1144 goto out;
1145 }
1146
1147 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1148 *data = ID_LED_DEFAULT;
1149
1150out:
1151 return ret_val;
1152}
1153
1154/**
1155 * e1000_id_led_init -
1156 * @hw: pointer to the HW structure
1157 *
1158 **/
1159s32 igb_id_led_init(struct e1000_hw *hw)
1160{
1161 struct e1000_mac_info *mac = &hw->mac;
1162 s32 ret_val;
1163 const u32 ledctl_mask = 0x000000FF;
1164 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1165 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1166 u16 data, i, temp;
1167 const u16 led_mask = 0x0F;
1168
1169 ret_val = igb_valid_led_default(hw, &data);
1170 if (ret_val)
1171 goto out;
1172
1173 mac->ledctl_default = rd32(E1000_LEDCTL);
1174 mac->ledctl_mode1 = mac->ledctl_default;
1175 mac->ledctl_mode2 = mac->ledctl_default;
1176
1177 for (i = 0; i < 4; i++) {
1178 temp = (data >> (i << 2)) & led_mask;
1179 switch (temp) {
1180 case ID_LED_ON1_DEF2:
1181 case ID_LED_ON1_ON2:
1182 case ID_LED_ON1_OFF2:
1183 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1184 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1185 break;
1186 case ID_LED_OFF1_DEF2:
1187 case ID_LED_OFF1_ON2:
1188 case ID_LED_OFF1_OFF2:
1189 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1190 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1191 break;
1192 default:
1193 /* Do nothing */
1194 break;
1195 }
1196 switch (temp) {
1197 case ID_LED_DEF1_ON2:
1198 case ID_LED_ON1_ON2:
1199 case ID_LED_OFF1_ON2:
1200 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1201 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1202 break;
1203 case ID_LED_DEF1_OFF2:
1204 case ID_LED_ON1_OFF2:
1205 case ID_LED_OFF1_OFF2:
1206 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1207 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1208 break;
1209 default:
1210 /* Do nothing */
1211 break;
1212 }
1213 }
1214
1215out:
1216 return ret_val;
1217}
1218
1219/**
1220 * e1000_cleanup_led - Set LED config to default operation
1221 * @hw: pointer to the HW structure
1222 *
1223 * Remove the current LED configuration and set the LED configuration
1224 * to the default value, saved from the EEPROM.
1225 **/
1226s32 igb_cleanup_led(struct e1000_hw *hw)
1227{
1228 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1229 return 0;
1230}
1231
1232/**
1233 * e1000_blink_led - Blink LED
1234 * @hw: pointer to the HW structure
1235 *
1236 * Blink the led's which are set to be on.
1237 **/
1238s32 igb_blink_led(struct e1000_hw *hw)
1239{
1240 u32 ledctl_blink = 0;
1241 u32 i;
1242
1243 if (hw->phy.media_type == e1000_media_type_fiber) {
1244 /* always blink LED0 for PCI-E fiber */
1245 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1246 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1247 } else {
1248 /*
1249 * set the blink bit for each LED that's "on" (0x0E)
1250 * in ledctl_mode2
1251 */
1252 ledctl_blink = hw->mac.ledctl_mode2;
1253 for (i = 0; i < 4; i++)
1254 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1255 E1000_LEDCTL_MODE_LED_ON)
1256 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1257 (i * 8));
1258 }
1259
1260 wr32(E1000_LEDCTL, ledctl_blink);
1261
1262 return 0;
1263}
1264
1265/**
1266 * e1000_led_off - Turn LED off
1267 * @hw: pointer to the HW structure
1268 *
1269 * Turn LED off.
1270 **/
1271s32 igb_led_off(struct e1000_hw *hw)
1272{
1273 u32 ctrl;
1274
1275 switch (hw->phy.media_type) {
1276 case e1000_media_type_fiber:
1277 ctrl = rd32(E1000_CTRL);
1278 ctrl |= E1000_CTRL_SWDPIN0;
1279 ctrl |= E1000_CTRL_SWDPIO0;
1280 wr32(E1000_CTRL, ctrl);
1281 break;
1282 case e1000_media_type_copper:
1283 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1284 break;
1285 default:
1286 break;
1287 }
1288
1289 return 0;
1290}
1291
1292/**
1293 * e1000_disable_pcie_master - Disables PCI-express master access
1294 * @hw: pointer to the HW structure
1295 *
1296 * Returns 0 (0) if successful, else returns -10
1297 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1298 * the master requests to be disabled.
1299 *
1300 * Disables PCI-Express master access and verifies there are no pending
1301 * requests.
1302 **/
1303s32 igb_disable_pcie_master(struct e1000_hw *hw)
1304{
1305 u32 ctrl;
1306 s32 timeout = MASTER_DISABLE_TIMEOUT;
1307 s32 ret_val = 0;
1308
1309 if (hw->bus.type != e1000_bus_type_pci_express)
1310 goto out;
1311
1312 ctrl = rd32(E1000_CTRL);
1313 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1314 wr32(E1000_CTRL, ctrl);
1315
1316 while (timeout) {
1317 if (!(rd32(E1000_STATUS) &
1318 E1000_STATUS_GIO_MASTER_ENABLE))
1319 break;
1320 udelay(100);
1321 timeout--;
1322 }
1323
1324 if (!timeout) {
1325 hw_dbg(hw, "Master requests are pending.\n");
1326 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1327 goto out;
1328 }
1329
1330out:
1331 return ret_val;
1332}
1333
1334/**
1335 * e1000_reset_adaptive - Reset Adaptive Interframe Spacing
1336 * @hw: pointer to the HW structure
1337 *
1338 * Reset the Adaptive Interframe Spacing throttle to default values.
1339 **/
1340void igb_reset_adaptive(struct e1000_hw *hw)
1341{
1342 struct e1000_mac_info *mac = &hw->mac;
1343
1344 if (!mac->adaptive_ifs) {
1345 hw_dbg(hw, "Not in Adaptive IFS mode!\n");
1346 goto out;
1347 }
1348
1349 if (!mac->ifs_params_forced) {
1350 mac->current_ifs_val = 0;
1351 mac->ifs_min_val = IFS_MIN;
1352 mac->ifs_max_val = IFS_MAX;
1353 mac->ifs_step_size = IFS_STEP;
1354 mac->ifs_ratio = IFS_RATIO;
1355 }
1356
1357 mac->in_ifs_mode = false;
1358 wr32(E1000_AIT, 0);
1359out:
1360 return;
1361}
1362
1363/**
1364 * e1000_update_adaptive - Update Adaptive Interframe Spacing
1365 * @hw: pointer to the HW structure
1366 *
1367 * Update the Adaptive Interframe Spacing Throttle value based on the
1368 * time between transmitted packets and time between collisions.
1369 **/
1370void igb_update_adaptive(struct e1000_hw *hw)
1371{
1372 struct e1000_mac_info *mac = &hw->mac;
1373
1374 if (!mac->adaptive_ifs) {
1375 hw_dbg(hw, "Not in Adaptive IFS mode!\n");
1376 goto out;
1377 }
1378
1379 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1380 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1381 mac->in_ifs_mode = true;
1382 if (mac->current_ifs_val < mac->ifs_max_val) {
1383 if (!mac->current_ifs_val)
1384 mac->current_ifs_val = mac->ifs_min_val;
1385 else
1386 mac->current_ifs_val +=
1387 mac->ifs_step_size;
1388 wr32(E1000_AIT,
1389 mac->current_ifs_val);
1390 }
1391 }
1392 } else {
1393 if (mac->in_ifs_mode &&
1394 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1395 mac->current_ifs_val = 0;
1396 mac->in_ifs_mode = false;
1397 wr32(E1000_AIT, 0);
1398 }
1399 }
1400out:
1401 return;
1402}
1403
1404/**
1405 * e1000_validate_mdi_setting - Verify MDI/MDIx settings
1406 * @hw: pointer to the HW structure
1407 *
1408 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1409 * set, which is forced to MDI mode only.
1410 **/
1411s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1412{
1413 s32 ret_val = 0;
1414
1415 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1416 hw_dbg(hw, "Invalid MDI setting detected\n");
1417 hw->phy.mdix = 1;
1418 ret_val = -E1000_ERR_CONFIG;
1419 goto out;
1420 }
1421
1422out:
1423 return ret_val;
1424}
1425
1426/**
1427 * e1000_write_8bit_ctrl_reg - Write a 8bit CTRL register
1428 * @hw: pointer to the HW structure
1429 * @reg: 32bit register offset such as E1000_SCTL
1430 * @offset: register offset to write to
1431 * @data: data to write at register offset
1432 *
1433 * Writes an address/data control type register. There are several of these
1434 * and they all have the format address << 8 | data and bit 31 is polled for
1435 * completion.
1436 **/
1437s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1438 u32 offset, u8 data)
1439{
1440 u32 i, regvalue = 0;
1441 s32 ret_val = 0;
1442
1443 /* Set up the address and data */
1444 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1445 wr32(reg, regvalue);
1446
1447 /* Poll the ready bit to see if the MDI read completed */
1448 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1449 udelay(5);
1450 regvalue = rd32(reg);
1451 if (regvalue & E1000_GEN_CTL_READY)
1452 break;
1453 }
1454 if (!(regvalue & E1000_GEN_CTL_READY)) {
1455 hw_dbg(hw, "Reg %08x did not indicate ready\n", reg);
1456 ret_val = -E1000_ERR_PHY;
1457 goto out;
1458 }
1459
1460out:
1461 return ret_val;
1462}
1463
1464/**
1465 * e1000_enable_mng_pass_thru - Enable processing of ARP's
1466 * @hw: pointer to the HW structure
1467 *
1468 * Verifies the hardware needs to allow ARPs to be processed by the host.
1469 **/
1470bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1471{
1472 u32 manc;
1473 u32 fwsm, factps;
1474 bool ret_val = false;
1475
1476 if (!hw->mac.asf_firmware_present)
1477 goto out;
1478
1479 manc = rd32(E1000_MANC);
1480
1481 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
1482 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1483 goto out;
1484
1485 if (hw->mac.arc_subsystem_valid) {
1486 fwsm = rd32(E1000_FWSM);
1487 factps = rd32(E1000_FACTPS);
1488
1489 if (!(factps & E1000_FACTPS_MNGCG) &&
1490 ((fwsm & E1000_FWSM_MODE_MASK) ==
1491 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1492 ret_val = true;
1493 goto out;
1494 }
1495 } else {
1496 if ((manc & E1000_MANC_SMBUS_EN) &&
1497 !(manc & E1000_MANC_ASF_EN)) {
1498 ret_val = true;
1499 goto out;
1500 }
1501 }
1502
1503out:
1504 return ret_val;
1505}
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
new file mode 100644
index 000000000000..326b6592307b
--- /dev/null
+++ b/drivers/net/igb/e1000_mac.h
@@ -0,0 +1,98 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MAC_H_
29#define _E1000_MAC_H_
30
31#include "e1000_hw.h"
32
33#include "e1000_phy.h"
34#include "e1000_nvm.h"
35#include "e1000_defines.h"
36
37/*
38 * Functions that should not be called directly from drivers but can be used
39 * by other files in this 'shared code'
40 */
41s32 igb_blink_led(struct e1000_hw *hw);
42s32 igb_check_for_copper_link(struct e1000_hw *hw);
43s32 igb_cleanup_led(struct e1000_hw *hw);
44s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
45s32 igb_disable_pcie_master(struct e1000_hw *hw);
46s32 igb_force_mac_fc(struct e1000_hw *hw);
47s32 igb_get_auto_rd_done(struct e1000_hw *hw);
48s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
49s32 igb_get_hw_semaphore(struct e1000_hw *hw);
50s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
51 u16 *duplex);
52s32 igb_id_led_init(struct e1000_hw *hw);
53s32 igb_led_off(struct e1000_hw *hw);
54void igb_update_mc_addr_list(struct e1000_hw *hw,
55 u8 *mc_addr_list, u32 mc_addr_count,
56 u32 rar_used_count, u32 rar_count);
57s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
60 u32 offset, u8 data);
61
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw);
64void igb_config_collision_dist(struct e1000_hw *hw);
65void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
66void igb_put_hw_semaphore(struct e1000_hw *hw);
67void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
68s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
69void igb_remove_device(struct e1000_hw *hw);
70void igb_reset_adaptive(struct e1000_hw *hw);
71void igb_update_adaptive(struct e1000_hw *hw);
72void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
73
74bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
75
76enum e1000_mng_mode {
77 e1000_mng_mode_none = 0,
78 e1000_mng_mode_asf,
79 e1000_mng_mode_pt,
80 e1000_mng_mode_ipmi,
81 e1000_mng_mode_host_if_only
82};
83
84#define E1000_FACTPS_MNGCG 0x20000000
85
86#define E1000_FWSM_MODE_MASK 0xE
87#define E1000_FWSM_MODE_SHIFT 1
88
89#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
90#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
91
92#define E1000_HICR_EN 0x01 /* Enable bit - RO */
93/* Driver sets this bit when done to put command in RAM */
94#define E1000_HICR_C 0x02
95
96extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
97
98#endif
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
new file mode 100644
index 000000000000..2897106fee92
--- /dev/null
+++ b/drivers/net/igb/e1000_nvm.c
@@ -0,0 +1,605 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30
31#include "e1000_mac.h"
32#include "e1000_nvm.h"
33
34/**
35 * e1000_raise_eec_clk - Raise EEPROM clock
36 * @hw: pointer to the HW structure
37 * @eecd: pointer to the EEPROM
38 *
39 * Enable/Raise the EEPROM clock bit.
40 **/
41static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
42{
43 *eecd = *eecd | E1000_EECD_SK;
44 wr32(E1000_EECD, *eecd);
45 wrfl();
46 udelay(hw->nvm.delay_usec);
47}
48
49/**
50 * e1000_lower_eec_clk - Lower EEPROM clock
51 * @hw: pointer to the HW structure
52 * @eecd: pointer to the EEPROM
53 *
54 * Clear/Lower the EEPROM clock bit.
55 **/
56static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
57{
58 *eecd = *eecd & ~E1000_EECD_SK;
59 wr32(E1000_EECD, *eecd);
60 wrfl();
61 udelay(hw->nvm.delay_usec);
62}
63
64/**
65 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
66 * @hw: pointer to the HW structure
67 * @data: data to send to the EEPROM
68 * @count: number of bits to shift out
69 *
70 * We need to shift 'count' bits out to the EEPROM. So, the value in the
71 * "data" parameter will be shifted out to the EEPROM one bit at a time.
72 * In order to do this, "data" must be broken down into bits.
73 **/
74static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
75{
76 struct e1000_nvm_info *nvm = &hw->nvm;
77 u32 eecd = rd32(E1000_EECD);
78 u32 mask;
79
80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_microwire)
82 eecd &= ~E1000_EECD_DO;
83 else if (nvm->type == e1000_nvm_eeprom_spi)
84 eecd |= E1000_EECD_DO;
85
86 do {
87 eecd &= ~E1000_EECD_DI;
88
89 if (data & mask)
90 eecd |= E1000_EECD_DI;
91
92 wr32(E1000_EECD, eecd);
93 wrfl();
94
95 udelay(nvm->delay_usec);
96
97 igb_raise_eec_clk(hw, &eecd);
98 igb_lower_eec_clk(hw, &eecd);
99
100 mask >>= 1;
101 } while (mask);
102
103 eecd &= ~E1000_EECD_DI;
104 wr32(E1000_EECD, eecd);
105}
106
107/**
108 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
109 * @hw: pointer to the HW structure
110 * @count: number of bits to shift in
111 *
112 * In order to read a register from the EEPROM, we need to shift 'count' bits
113 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
114 * the EEPROM (setting the SK bit), and then reading the value of the data out
115 * "DO" bit. During this "shifting in" process the data in "DI" bit should
116 * always be clear.
117 **/
118static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
119{
120 u32 eecd;
121 u32 i;
122 u16 data;
123
124 eecd = rd32(E1000_EECD);
125
126 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
127 data = 0;
128
129 for (i = 0; i < count; i++) {
130 data <<= 1;
131 igb_raise_eec_clk(hw, &eecd);
132
133 eecd = rd32(E1000_EECD);
134
135 eecd &= ~E1000_EECD_DI;
136 if (eecd & E1000_EECD_DO)
137 data |= 1;
138
139 igb_lower_eec_clk(hw, &eecd);
140 }
141
142 return data;
143}
144
145/**
146 * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
147 * @hw: pointer to the HW structure
148 * @ee_reg: EEPROM flag for polling
149 *
150 * Polls the EEPROM status bit for either read or write completion based
151 * upon the value of 'ee_reg'.
152 **/
153static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
154{
155 u32 attempts = 100000;
156 u32 i, reg = 0;
157 s32 ret_val = -E1000_ERR_NVM;
158
159 for (i = 0; i < attempts; i++) {
160 if (ee_reg == E1000_NVM_POLL_READ)
161 reg = rd32(E1000_EERD);
162 else
163 reg = rd32(E1000_EEWR);
164
165 if (reg & E1000_NVM_RW_REG_DONE) {
166 ret_val = 0;
167 break;
168 }
169
170 udelay(5);
171 }
172
173 return ret_val;
174}
175
176/**
177 * e1000_acquire_nvm - Generic request for access to EEPROM
178 * @hw: pointer to the HW structure
179 *
180 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
181 * Return successful if access grant bit set, else clear the request for
182 * EEPROM access and return -E1000_ERR_NVM (-1).
183 **/
184s32 igb_acquire_nvm(struct e1000_hw *hw)
185{
186 u32 eecd = rd32(E1000_EECD);
187 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
188 s32 ret_val = 0;
189
190
191 wr32(E1000_EECD, eecd | E1000_EECD_REQ);
192 eecd = rd32(E1000_EECD);
193
194 while (timeout) {
195 if (eecd & E1000_EECD_GNT)
196 break;
197 udelay(5);
198 eecd = rd32(E1000_EECD);
199 timeout--;
200 }
201
202 if (!timeout) {
203 eecd &= ~E1000_EECD_REQ;
204 wr32(E1000_EECD, eecd);
205 hw_dbg(hw, "Could not acquire NVM grant\n");
206 ret_val = -E1000_ERR_NVM;
207 }
208
209 return ret_val;
210}
211
212/**
213 * e1000_standby_nvm - Return EEPROM to standby state
214 * @hw: pointer to the HW structure
215 *
216 * Return the EEPROM to a standby state.
217 **/
218static void igb_standby_nvm(struct e1000_hw *hw)
219{
220 struct e1000_nvm_info *nvm = &hw->nvm;
221 u32 eecd = rd32(E1000_EECD);
222
223 if (nvm->type == e1000_nvm_eeprom_microwire) {
224 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
225 wr32(E1000_EECD, eecd);
226 wrfl();
227 udelay(nvm->delay_usec);
228
229 igb_raise_eec_clk(hw, &eecd);
230
231 /* Select EEPROM */
232 eecd |= E1000_EECD_CS;
233 wr32(E1000_EECD, eecd);
234 wrfl();
235 udelay(nvm->delay_usec);
236
237 igb_lower_eec_clk(hw, &eecd);
238 } else if (nvm->type == e1000_nvm_eeprom_spi) {
239 /* Toggle CS to flush commands */
240 eecd |= E1000_EECD_CS;
241 wr32(E1000_EECD, eecd);
242 wrfl();
243 udelay(nvm->delay_usec);
244 eecd &= ~E1000_EECD_CS;
245 wr32(E1000_EECD, eecd);
246 wrfl();
247 udelay(nvm->delay_usec);
248 }
249}
250
251/**
252 * e1000_stop_nvm - Terminate EEPROM command
253 * @hw: pointer to the HW structure
254 *
255 * Terminates the current command by inverting the EEPROM's chip select pin.
256 **/
257static void e1000_stop_nvm(struct e1000_hw *hw)
258{
259 u32 eecd;
260
261 eecd = rd32(E1000_EECD);
262 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
263 /* Pull CS high */
264 eecd |= E1000_EECD_CS;
265 igb_lower_eec_clk(hw, &eecd);
266 } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
267 /* CS on Microcwire is active-high */
268 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
269 wr32(E1000_EECD, eecd);
270 igb_raise_eec_clk(hw, &eecd);
271 igb_lower_eec_clk(hw, &eecd);
272 }
273}
274
275/**
276 * e1000_release_nvm - Release exclusive access to EEPROM
277 * @hw: pointer to the HW structure
278 *
279 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
280 **/
281void igb_release_nvm(struct e1000_hw *hw)
282{
283 u32 eecd;
284
285 e1000_stop_nvm(hw);
286
287 eecd = rd32(E1000_EECD);
288 eecd &= ~E1000_EECD_REQ;
289 wr32(E1000_EECD, eecd);
290}
291
292/**
293 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
294 * @hw: pointer to the HW structure
295 *
296 * Setups the EEPROM for reading and writing.
297 **/
298static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
299{
300 struct e1000_nvm_info *nvm = &hw->nvm;
301 u32 eecd = rd32(E1000_EECD);
302 s32 ret_val = 0;
303 u16 timeout = 0;
304 u8 spi_stat_reg;
305
306
307 if (nvm->type == e1000_nvm_eeprom_microwire) {
308 /* Clear SK and DI */
309 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
310 wr32(E1000_EECD, eecd);
311 /* Set CS */
312 eecd |= E1000_EECD_CS;
313 wr32(E1000_EECD, eecd);
314 } else if (nvm->type == e1000_nvm_eeprom_spi) {
315 /* Clear SK and CS */
316 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
317 wr32(E1000_EECD, eecd);
318 udelay(1);
319 timeout = NVM_MAX_RETRY_SPI;
320
321 /*
322 * Read "Status Register" repeatedly until the LSB is cleared.
323 * The EEPROM will signal that the command has been completed
324 * by clearing bit 0 of the internal status register. If it's
325 * not cleared within 'timeout', then error out.
326 */
327 while (timeout) {
328 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
329 hw->nvm.opcode_bits);
330 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
331 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
332 break;
333
334 udelay(5);
335 igb_standby_nvm(hw);
336 timeout--;
337 }
338
339 if (!timeout) {
340 hw_dbg(hw, "SPI NVM Status error\n");
341 ret_val = -E1000_ERR_NVM;
342 goto out;
343 }
344 }
345
346out:
347 return ret_val;
348}
349
350/**
351 * e1000_read_nvm_eerd - Reads EEPROM using EERD register
352 * @hw: pointer to the HW structure
353 * @offset: offset of word in the EEPROM to read
354 * @words: number of words to read
355 * @data: word read from the EEPROM
356 *
357 * Reads a 16 bit word from the EEPROM using the EERD register.
358 **/
359s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
360{
361 struct e1000_nvm_info *nvm = &hw->nvm;
362 u32 i, eerd = 0;
363 s32 ret_val = 0;
364
365 /*
366 * A check for invalid values: offset too large, too many words,
367 * and not enough words.
368 */
369 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
370 (words == 0)) {
371 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
372 ret_val = -E1000_ERR_NVM;
373 goto out;
374 }
375
376 for (i = 0; i < words; i++) {
377 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
378 E1000_NVM_RW_REG_START;
379
380 wr32(E1000_EERD, eerd);
381 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
382 if (ret_val)
383 break;
384
385 data[i] = (rd32(E1000_EERD) >>
386 E1000_NVM_RW_REG_DATA);
387 }
388
389out:
390 return ret_val;
391}
392
393/**
394 * e1000_write_nvm_spi - Write to EEPROM using SPI
395 * @hw: pointer to the HW structure
396 * @offset: offset within the EEPROM to be written to
397 * @words: number of words to write
398 * @data: 16 bit word(s) to be written to the EEPROM
399 *
400 * Writes data to EEPROM at offset using SPI interface.
401 *
402 * If e1000_update_nvm_checksum is not called after this function , the
403 * EEPROM will most likley contain an invalid checksum.
404 **/
405s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
406{
407 struct e1000_nvm_info *nvm = &hw->nvm;
408 s32 ret_val;
409 u16 widx = 0;
410
411 /*
412 * A check for invalid values: offset too large, too many words,
413 * and not enough words.
414 */
415 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
416 (words == 0)) {
417 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
418 ret_val = -E1000_ERR_NVM;
419 goto out;
420 }
421
422 ret_val = hw->nvm.ops.acquire_nvm(hw);
423 if (ret_val)
424 goto out;
425
426 msleep(10);
427
428 while (widx < words) {
429 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
430
431 ret_val = igb_ready_nvm_eeprom(hw);
432 if (ret_val)
433 goto release;
434
435 igb_standby_nvm(hw);
436
437 /* Send the WRITE ENABLE command (8 bit opcode) */
438 igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
439 nvm->opcode_bits);
440
441 igb_standby_nvm(hw);
442
443 /*
444 * Some SPI eeproms use the 8th address bit embedded in the
445 * opcode
446 */
447 if ((nvm->address_bits == 8) && (offset >= 128))
448 write_opcode |= NVM_A8_OPCODE_SPI;
449
450 /* Send the Write command (8-bit opcode + addr) */
451 igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
452 igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
453 nvm->address_bits);
454
455 /* Loop to allow for up to whole page write of eeprom */
456 while (widx < words) {
457 u16 word_out = data[widx];
458 word_out = (word_out >> 8) | (word_out << 8);
459 igb_shift_out_eec_bits(hw, word_out, 16);
460 widx++;
461
462 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
463 igb_standby_nvm(hw);
464 break;
465 }
466 }
467 }
468
469 msleep(10);
470release:
471 hw->nvm.ops.release_nvm(hw);
472
473out:
474 return ret_val;
475}
476
477/**
478 * e1000_read_part_num - Read device part number
479 * @hw: pointer to the HW structure
480 * @part_num: pointer to device part number
481 *
482 * Reads the product board assembly (PBA) number from the EEPROM and stores
483 * the value in part_num.
484 **/
485s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
486{
487 s32 ret_val;
488 u16 nvm_data;
489
490 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
491 if (ret_val) {
492 hw_dbg(hw, "NVM Read Error\n");
493 goto out;
494 }
495 *part_num = (u32)(nvm_data << 16);
496
497 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
498 if (ret_val) {
499 hw_dbg(hw, "NVM Read Error\n");
500 goto out;
501 }
502 *part_num |= nvm_data;
503
504out:
505 return ret_val;
506}
507
508/**
509 * e1000_read_mac_addr - Read device MAC address
510 * @hw: pointer to the HW structure
511 *
512 * Reads the device MAC address from the EEPROM and stores the value.
513 * Since devices with two ports use the same EEPROM, we increment the
514 * last bit in the MAC address for the second port.
515 **/
516s32 igb_read_mac_addr(struct e1000_hw *hw)
517{
518 s32 ret_val = 0;
519 u16 offset, nvm_data, i;
520
521 for (i = 0; i < ETH_ALEN; i += 2) {
522 offset = i >> 1;
523 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
524 if (ret_val) {
525 hw_dbg(hw, "NVM Read Error\n");
526 goto out;
527 }
528 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
529 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
530 }
531
532 /* Flip last bit of mac address if we're on second port */
533 if (hw->bus.func == E1000_FUNC_1)
534 hw->mac.perm_addr[5] ^= 1;
535
536 for (i = 0; i < ETH_ALEN; i++)
537 hw->mac.addr[i] = hw->mac.perm_addr[i];
538
539out:
540 return ret_val;
541}
542
543/**
544 * e1000_validate_nvm_checksum - Validate EEPROM checksum
545 * @hw: pointer to the HW structure
546 *
547 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
548 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
549 **/
550s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
551{
552 s32 ret_val = 0;
553 u16 checksum = 0;
554 u16 i, nvm_data;
555
556 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
557 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
558 if (ret_val) {
559 hw_dbg(hw, "NVM Read Error\n");
560 goto out;
561 }
562 checksum += nvm_data;
563 }
564
565 if (checksum != (u16) NVM_SUM) {
566 hw_dbg(hw, "NVM Checksum Invalid\n");
567 ret_val = -E1000_ERR_NVM;
568 goto out;
569 }
570
571out:
572 return ret_val;
573}
574
575/**
576 * e1000_update_nvm_checksum - Update EEPROM checksum
577 * @hw: pointer to the HW structure
578 *
579 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
580 * up to the checksum. Then calculates the EEPROM checksum and writes the
581 * value to the EEPROM.
582 **/
583s32 igb_update_nvm_checksum(struct e1000_hw *hw)
584{
585 s32 ret_val;
586 u16 checksum = 0;
587 u16 i, nvm_data;
588
589 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
590 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
591 if (ret_val) {
592 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
593 goto out;
594 }
595 checksum += nvm_data;
596 }
597 checksum = (u16) NVM_SUM - checksum;
598 ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
599 if (ret_val)
600 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
601
602out:
603 return ret_val;
604}
605
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
new file mode 100644
index 000000000000..1041c34dcbe1
--- /dev/null
+++ b/drivers/net/igb/e1000_nvm.h
@@ -0,0 +1,40 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_NVM_H_
29#define _E1000_NVM_H_
30
31s32 igb_acquire_nvm(struct e1000_hw *hw);
32void igb_release_nvm(struct e1000_hw *hw);
33s32 igb_read_mac_addr(struct e1000_hw *hw);
34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
36s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
38s32 igb_update_nvm_checksum(struct e1000_hw *hw);
39
40#endif
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
new file mode 100644
index 000000000000..08a86b107229
--- /dev/null
+++ b/drivers/net/igb/e1000_phy.c
@@ -0,0 +1,1807 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30
31#include "e1000_mac.h"
32#include "e1000_phy.h"
33
34static s32 igb_get_phy_cfg_done(struct e1000_hw *hw);
35static void igb_release_phy(struct e1000_hw *hw);
36static s32 igb_acquire_phy(struct e1000_hw *hw);
37static s32 igb_phy_reset_dsp(struct e1000_hw *hw);
38static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
39static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
40 u16 *phy_ctrl);
41static s32 igb_wait_autoneg(struct e1000_hw *hw);
42
43/* Cable length tables */
44static const u16 e1000_m88_cable_length_table[] =
45 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
46#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
47 (sizeof(e1000_m88_cable_length_table) / \
48 sizeof(e1000_m88_cable_length_table[0]))
49
50static const u16 e1000_igp_2_cable_length_table[] =
51 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
52 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
53 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
54 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
55 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
56 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
57 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
58 104, 109, 114, 118, 121, 124};
59#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
60 (sizeof(e1000_igp_2_cable_length_table) / \
61 sizeof(e1000_igp_2_cable_length_table[0]))
62
63/**
64 * e1000_check_reset_block - Check if PHY reset is blocked
65 * @hw: pointer to the HW structure
66 *
67 * Read the PHY management control register and check whether a PHY reset
68 * is blocked. If a reset is not blocked return 0, otherwise
69 * return E1000_BLK_PHY_RESET (12).
70 **/
71s32 igb_check_reset_block(struct e1000_hw *hw)
72{
73 u32 manc;
74
75 manc = rd32(E1000_MANC);
76
77 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
78 E1000_BLK_PHY_RESET : 0;
79}
80
81/**
82 * e1000_get_phy_id - Retrieve the PHY ID and revision
83 * @hw: pointer to the HW structure
84 *
85 * Reads the PHY registers and stores the PHY ID and possibly the PHY
86 * revision in the hardware structure.
87 **/
88s32 igb_get_phy_id(struct e1000_hw *hw)
89{
90 struct e1000_phy_info *phy = &hw->phy;
91 s32 ret_val = 0;
92 u16 phy_id;
93
94 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID1, &phy_id);
95 if (ret_val)
96 goto out;
97
98 phy->id = (u32)(phy_id << 16);
99 udelay(20);
100 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID2, &phy_id);
101 if (ret_val)
102 goto out;
103
104 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
105 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
106
107out:
108 return ret_val;
109}
110
111/**
112 * e1000_phy_reset_dsp - Reset PHY DSP
113 * @hw: pointer to the HW structure
114 *
115 * Reset the digital signal processor.
116 **/
117static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
118{
119 s32 ret_val;
120
121 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
122 if (ret_val)
123 goto out;
124
125 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
126
127out:
128 return ret_val;
129}
130
131/**
132 * e1000_read_phy_reg_mdic - Read MDI control register
133 * @hw: pointer to the HW structure
134 * @offset: register offset to be read
135 * @data: pointer to the read data
136 *
137 * Reads the MDI control regsiter in the PHY at offset and stores the
138 * information read to data.
139 **/
140static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
141{
142 struct e1000_phy_info *phy = &hw->phy;
143 u32 i, mdic = 0;
144 s32 ret_val = 0;
145
146 if (offset > MAX_PHY_REG_ADDRESS) {
147 hw_dbg(hw, "PHY Address %d is out of range\n", offset);
148 ret_val = -E1000_ERR_PARAM;
149 goto out;
150 }
151
152 /*
153 * Set up Op-code, Phy Address, and register offset in the MDI
154 * Control register. The MAC will take care of interfacing with the
155 * PHY to retrieve the desired data.
156 */
157 mdic = ((offset << E1000_MDIC_REG_SHIFT) |
158 (phy->addr << E1000_MDIC_PHY_SHIFT) |
159 (E1000_MDIC_OP_READ));
160
161 wr32(E1000_MDIC, mdic);
162
163 /*
164 * Poll the ready bit to see if the MDI read completed
165 * Increasing the time out as testing showed failures with
166 * the lower time out
167 */
168 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
169 udelay(50);
170 mdic = rd32(E1000_MDIC);
171 if (mdic & E1000_MDIC_READY)
172 break;
173 }
174 if (!(mdic & E1000_MDIC_READY)) {
175 hw_dbg(hw, "MDI Read did not complete\n");
176 ret_val = -E1000_ERR_PHY;
177 goto out;
178 }
179 if (mdic & E1000_MDIC_ERROR) {
180 hw_dbg(hw, "MDI Error\n");
181 ret_val = -E1000_ERR_PHY;
182 goto out;
183 }
184 *data = (u16) mdic;
185
186out:
187 return ret_val;
188}
189
190/**
191 * e1000_write_phy_reg_mdic - Write MDI control register
192 * @hw: pointer to the HW structure
193 * @offset: register offset to write to
194 * @data: data to write to register at offset
195 *
196 * Writes data to MDI control register in the PHY at offset.
197 **/
198static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
199{
200 struct e1000_phy_info *phy = &hw->phy;
201 u32 i, mdic = 0;
202 s32 ret_val = 0;
203
204 if (offset > MAX_PHY_REG_ADDRESS) {
205 hw_dbg(hw, "PHY Address %d is out of range\n", offset);
206 ret_val = -E1000_ERR_PARAM;
207 goto out;
208 }
209
210 /*
211 * Set up Op-code, Phy Address, and register offset in the MDI
212 * Control register. The MAC will take care of interfacing with the
213 * PHY to retrieve the desired data.
214 */
215 mdic = (((u32)data) |
216 (offset << E1000_MDIC_REG_SHIFT) |
217 (phy->addr << E1000_MDIC_PHY_SHIFT) |
218 (E1000_MDIC_OP_WRITE));
219
220 wr32(E1000_MDIC, mdic);
221
222 /*
223 * Poll the ready bit to see if the MDI read completed
224 * Increasing the time out as testing showed failures with
225 * the lower time out
226 */
227 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
228 udelay(50);
229 mdic = rd32(E1000_MDIC);
230 if (mdic & E1000_MDIC_READY)
231 break;
232 }
233 if (!(mdic & E1000_MDIC_READY)) {
234 hw_dbg(hw, "MDI Write did not complete\n");
235 ret_val = -E1000_ERR_PHY;
236 goto out;
237 }
238 if (mdic & E1000_MDIC_ERROR) {
239 hw_dbg(hw, "MDI Error\n");
240 ret_val = -E1000_ERR_PHY;
241 goto out;
242 }
243
244out:
245 return ret_val;
246}
247
248/**
249 * e1000_read_phy_reg_igp - Read igp PHY register
250 * @hw: pointer to the HW structure
251 * @offset: register offset to be read
252 * @data: pointer to the read data
253 *
254 * Acquires semaphore, if necessary, then reads the PHY register at offset
255 * and storing the retrieved information in data. Release any acquired
256 * semaphores before exiting.
257 **/
258s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
259{
260 s32 ret_val;
261
262 ret_val = igb_acquire_phy(hw);
263 if (ret_val)
264 goto out;
265
266 if (offset > MAX_PHY_MULTI_PAGE_REG) {
267 ret_val = igb_write_phy_reg_mdic(hw,
268 IGP01E1000_PHY_PAGE_SELECT,
269 (u16)offset);
270 if (ret_val) {
271 igb_release_phy(hw);
272 goto out;
273 }
274 }
275
276 ret_val = igb_read_phy_reg_mdic(hw,
277 MAX_PHY_REG_ADDRESS & offset,
278 data);
279
280 igb_release_phy(hw);
281
282out:
283 return ret_val;
284}
285
286/**
287 * e1000_write_phy_reg_igp - Write igp PHY register
288 * @hw: pointer to the HW structure
289 * @offset: register offset to write to
290 * @data: data to write at register offset
291 *
292 * Acquires semaphore, if necessary, then writes the data to PHY register
293 * at the offset. Release any acquired semaphores before exiting.
294 **/
295s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
296{
297 s32 ret_val;
298
299 ret_val = igb_acquire_phy(hw);
300 if (ret_val)
301 goto out;
302
303 if (offset > MAX_PHY_MULTI_PAGE_REG) {
304 ret_val = igb_write_phy_reg_mdic(hw,
305 IGP01E1000_PHY_PAGE_SELECT,
306 (u16)offset);
307 if (ret_val) {
308 igb_release_phy(hw);
309 goto out;
310 }
311 }
312
313 ret_val = igb_write_phy_reg_mdic(hw,
314 MAX_PHY_REG_ADDRESS & offset,
315 data);
316
317 igb_release_phy(hw);
318
319out:
320 return ret_val;
321}
322
323/**
324 * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
325 * @hw: pointer to the HW structure
326 *
327 * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
328 * and downshift values are set also.
329 **/
330s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
331{
332 struct e1000_phy_info *phy = &hw->phy;
333 s32 ret_val;
334 u16 phy_data;
335
336 if (phy->reset_disable) {
337 ret_val = 0;
338 goto out;
339 }
340
341 /* Enable CRS on TX. This must be set for half-duplex operation. */
342 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
343 &phy_data);
344 if (ret_val)
345 goto out;
346
347 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
348
349 /*
350 * Options:
351 * MDI/MDI-X = 0 (default)
352 * 0 - Auto for all speeds
353 * 1 - MDI mode
354 * 2 - MDI-X mode
355 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
356 */
357 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
358
359 switch (phy->mdix) {
360 case 1:
361 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
362 break;
363 case 2:
364 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
365 break;
366 case 3:
367 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
368 break;
369 case 0:
370 default:
371 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
372 break;
373 }
374
375 /*
376 * Options:
377 * disable_polarity_correction = 0 (default)
378 * Automatic Correction for Reversed Cable Polarity
379 * 0 - Disabled
380 * 1 - Enabled
381 */
382 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
383 if (phy->disable_polarity_correction == 1)
384 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
385
386 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
387 phy_data);
388 if (ret_val)
389 goto out;
390
391 if (phy->revision < E1000_REVISION_4) {
392 /*
393 * Force TX_CLK in the Extended PHY Specific Control Register
394 * to 25MHz clock.
395 */
396 ret_val = hw->phy.ops.read_phy_reg(hw,
397 M88E1000_EXT_PHY_SPEC_CTRL,
398 &phy_data);
399 if (ret_val)
400 goto out;
401
402 phy_data |= M88E1000_EPSCR_TX_CLK_25;
403
404 if ((phy->revision == E1000_REVISION_2) &&
405 (phy->id == M88E1111_I_PHY_ID)) {
406 /* 82573L PHY - set the downshift counter to 5x. */
407 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
408 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
409 } else {
410 /* Configure Master and Slave downshift values */
411 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
412 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
413 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
414 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
415 }
416 ret_val = hw->phy.ops.write_phy_reg(hw,
417 M88E1000_EXT_PHY_SPEC_CTRL,
418 phy_data);
419 if (ret_val)
420 goto out;
421 }
422
423 /* Commit the changes. */
424 ret_val = igb_phy_sw_reset(hw);
425 if (ret_val) {
426 hw_dbg(hw, "Error committing the PHY changes\n");
427 goto out;
428 }
429
430out:
431 return ret_val;
432}
433
434/**
435 * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
436 * @hw: pointer to the HW structure
437 *
438 * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
439 * igp PHY's.
440 **/
441s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
442{
443 struct e1000_phy_info *phy = &hw->phy;
444 s32 ret_val;
445 u16 data;
446
447 if (phy->reset_disable) {
448 ret_val = 0;
449 goto out;
450 }
451
452 ret_val = hw->phy.ops.reset_phy(hw);
453 if (ret_val) {
454 hw_dbg(hw, "Error resetting the PHY.\n");
455 goto out;
456 }
457
458 /* Wait 15ms for MAC to configure PHY from NVM settings. */
459 msleep(15);
460
461 /*
462 * The NVM settings will configure LPLU in D3 for
463 * non-IGP1 PHYs.
464 */
465 if (phy->type == e1000_phy_igp) {
466 /* disable lplu d3 during driver init */
467 if (hw->phy.ops.set_d3_lplu_state)
468 ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
469 if (ret_val) {
470 hw_dbg(hw, "Error Disabling LPLU D3\n");
471 goto out;
472 }
473 }
474
475 /* disable lplu d0 during driver init */
476 ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
477 if (ret_val) {
478 hw_dbg(hw, "Error Disabling LPLU D0\n");
479 goto out;
480 }
481 /* Configure mdi-mdix settings */
482 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
483 if (ret_val)
484 goto out;
485
486 data &= ~IGP01E1000_PSCR_AUTO_MDIX;
487
488 switch (phy->mdix) {
489 case 1:
490 data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
491 break;
492 case 2:
493 data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
494 break;
495 case 0:
496 default:
497 data |= IGP01E1000_PSCR_AUTO_MDIX;
498 break;
499 }
500 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
501 if (ret_val)
502 goto out;
503
504 /* set auto-master slave resolution settings */
505 if (hw->mac.autoneg) {
506 /*
507 * when autonegotiation advertisement is only 1000Mbps then we
508 * should disable SmartSpeed and enable Auto MasterSlave
509 * resolution as hardware default.
510 */
511 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
512 /* Disable SmartSpeed */
513 ret_val = hw->phy.ops.read_phy_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG,
515 &data);
516 if (ret_val)
517 goto out;
518
519 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
520 ret_val = hw->phy.ops.write_phy_reg(hw,
521 IGP01E1000_PHY_PORT_CONFIG,
522 data);
523 if (ret_val)
524 goto out;
525
526 /* Set auto Master/Slave resolution process */
527 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL,
528 &data);
529 if (ret_val)
530 goto out;
531
532 data &= ~CR_1000T_MS_ENABLE;
533 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL,
534 data);
535 if (ret_val)
536 goto out;
537 }
538
539 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, &data);
540 if (ret_val)
541 goto out;
542
543 /* load defaults for future use */
544 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
545 ((data & CR_1000T_MS_VALUE) ?
546 e1000_ms_force_master :
547 e1000_ms_force_slave) :
548 e1000_ms_auto;
549
550 switch (phy->ms_type) {
551 case e1000_ms_force_master:
552 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
553 break;
554 case e1000_ms_force_slave:
555 data |= CR_1000T_MS_ENABLE;
556 data &= ~(CR_1000T_MS_VALUE);
557 break;
558 case e1000_ms_auto:
559 data &= ~CR_1000T_MS_ENABLE;
560 default:
561 break;
562 }
563 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, data);
564 if (ret_val)
565 goto out;
566 }
567
568out:
569 return ret_val;
570}
571
572/**
573 * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
574 * @hw: pointer to the HW structure
575 *
576 * Performs initial bounds checking on autoneg advertisement parameter, then
577 * configure to advertise the full capability. Setup the PHY to autoneg
578 * and restart the negotiation process between the link partner. If
579 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
580 **/
581s32 igb_copper_link_autoneg(struct e1000_hw *hw)
582{
583 struct e1000_phy_info *phy = &hw->phy;
584 s32 ret_val;
585 u16 phy_ctrl;
586
587 /*
588 * Perform some bounds checking on the autoneg advertisement
589 * parameter.
590 */
591 phy->autoneg_advertised &= phy->autoneg_mask;
592
593 /*
594 * If autoneg_advertised is zero, we assume it was not defaulted
595 * by the calling code so we set to advertise full capability.
596 */
597 if (phy->autoneg_advertised == 0)
598 phy->autoneg_advertised = phy->autoneg_mask;
599
600 hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n");
601 ret_val = igb_phy_setup_autoneg(hw);
602 if (ret_val) {
603 hw_dbg(hw, "Error Setting up Auto-Negotiation\n");
604 goto out;
605 }
606 hw_dbg(hw, "Restarting Auto-Neg\n");
607
608 /*
609 * Restart auto-negotiation by setting the Auto Neg Enable bit and
610 * the Auto Neg Restart bit in the PHY control register.
611 */
612 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
613 if (ret_val)
614 goto out;
615
616 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
617 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
618 if (ret_val)
619 goto out;
620
621 /*
622 * Does the user want to wait for Auto-Neg to complete here, or
623 * check at a later time (for example, callback routine).
624 */
625 if (phy->autoneg_wait_to_complete) {
626 ret_val = igb_wait_autoneg(hw);
627 if (ret_val) {
628 hw_dbg(hw, "Error while waiting for "
629 "autoneg to complete\n");
630 goto out;
631 }
632 }
633
634 hw->mac.get_link_status = true;
635
636out:
637 return ret_val;
638}
639
640/**
641 * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
642 * @hw: pointer to the HW structure
643 *
644 * Reads the MII auto-neg advertisement register and/or the 1000T control
645 * register and if the PHY is already setup for auto-negotiation, then
646 * return successful. Otherwise, setup advertisement and flow control to
647 * the appropriate values for the wanted auto-negotiation.
648 **/
649static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
650{
651 struct e1000_phy_info *phy = &hw->phy;
652 s32 ret_val;
653 u16 mii_autoneg_adv_reg;
654 u16 mii_1000t_ctrl_reg = 0;
655
656 phy->autoneg_advertised &= phy->autoneg_mask;
657
658 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
659 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
660 &mii_autoneg_adv_reg);
661 if (ret_val)
662 goto out;
663
664 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
665 /* Read the MII 1000Base-T Control Register (Address 9). */
666 ret_val = hw->phy.ops.read_phy_reg(hw,
667 PHY_1000T_CTRL,
668 &mii_1000t_ctrl_reg);
669 if (ret_val)
670 goto out;
671 }
672
673 /*
674 * Need to parse both autoneg_advertised and fc and set up
675 * the appropriate PHY registers. First we will parse for
676 * autoneg_advertised software override. Since we can advertise
677 * a plethora of combinations, we need to check each bit
678 * individually.
679 */
680
681 /*
682 * First we clear all the 10/100 mb speed bits in the Auto-Neg
683 * Advertisement Register (Address 4) and the 1000 mb speed bits in
684 * the 1000Base-T Control Register (Address 9).
685 */
686 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
687 NWAY_AR_100TX_HD_CAPS |
688 NWAY_AR_10T_FD_CAPS |
689 NWAY_AR_10T_HD_CAPS);
690 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
691
692 hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised);
693
694 /* Do we want to advertise 10 Mb Half Duplex? */
695 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
696 hw_dbg(hw, "Advertise 10mb Half duplex\n");
697 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
698 }
699
700 /* Do we want to advertise 10 Mb Full Duplex? */
701 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
702 hw_dbg(hw, "Advertise 10mb Full duplex\n");
703 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
704 }
705
706 /* Do we want to advertise 100 Mb Half Duplex? */
707 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
708 hw_dbg(hw, "Advertise 100mb Half duplex\n");
709 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
710 }
711
712 /* Do we want to advertise 100 Mb Full Duplex? */
713 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
714 hw_dbg(hw, "Advertise 100mb Full duplex\n");
715 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
716 }
717
718 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
719 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
720 hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n");
721
722 /* Do we want to advertise 1000 Mb Full Duplex? */
723 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
724 hw_dbg(hw, "Advertise 1000mb Full duplex\n");
725 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
726 }
727
728 /*
729 * Check for a software override of the flow control settings, and
730 * setup the PHY advertisement registers accordingly. If
731 * auto-negotiation is enabled, then software will have to set the
732 * "PAUSE" bits to the correct value in the Auto-Negotiation
733 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
734 * negotiation.
735 *
736 * The possible values of the "fc" parameter are:
737 * 0: Flow control is completely disabled
738 * 1: Rx flow control is enabled (we can receive pause frames
739 * but not send pause frames).
740 * 2: Tx flow control is enabled (we can send pause frames
741 * but we do not support receiving pause frames).
742 * 3: Both Rx and TX flow control (symmetric) are enabled.
743 * other: No software override. The flow control configuration
744 * in the EEPROM is used.
745 */
746 switch (hw->fc.type) {
747 case e1000_fc_none:
748 /*
749 * Flow control (RX & TX) is completely disabled by a
750 * software over-ride.
751 */
752 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
753 break;
754 case e1000_fc_rx_pause:
755 /*
756 * RX Flow control is enabled, and TX Flow control is
757 * disabled, by a software over-ride.
758 *
759 * Since there really isn't a way to advertise that we are
760 * capable of RX Pause ONLY, we will advertise that we
761 * support both symmetric and asymmetric RX PAUSE. Later
762 * (in e1000_config_fc_after_link_up) we will disable the
763 * hw's ability to send PAUSE frames.
764 */
765 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
766 break;
767 case e1000_fc_tx_pause:
768 /*
769 * TX Flow control is enabled, and RX Flow control is
770 * disabled, by a software over-ride.
771 */
772 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
773 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
774 break;
775 case e1000_fc_full:
776 /*
777 * Flow control (both RX and TX) is enabled by a software
778 * over-ride.
779 */
780 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
781 break;
782 default:
783 hw_dbg(hw, "Flow control param set incorrectly\n");
784 ret_val = -E1000_ERR_CONFIG;
785 goto out;
786 }
787
788 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_AUTONEG_ADV,
789 mii_autoneg_adv_reg);
790 if (ret_val)
791 goto out;
792
793 hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
794
795 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
796 ret_val = hw->phy.ops.write_phy_reg(hw,
797 PHY_1000T_CTRL,
798 mii_1000t_ctrl_reg);
799 if (ret_val)
800 goto out;
801 }
802
803out:
804 return ret_val;
805}
806
807/**
808 * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
809 * @hw: pointer to the HW structure
810 *
811 * Calls the PHY setup function to force speed and duplex. Clears the
812 * auto-crossover to force MDI manually. Waits for link and returns
813 * successful if link up is successful, else -E1000_ERR_PHY (-2).
814 **/
815s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
816{
817 struct e1000_phy_info *phy = &hw->phy;
818 s32 ret_val;
819 u16 phy_data;
820 bool link;
821
822 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
823 if (ret_val)
824 goto out;
825
826 igb_phy_force_speed_duplex_setup(hw, &phy_data);
827
828 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
829 if (ret_val)
830 goto out;
831
832 /*
833 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
834 * forced whenever speed and duplex are forced.
835 */
836 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
837 &phy_data);
838 if (ret_val)
839 goto out;
840
841 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
842 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
843
844 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
845 phy_data);
846 if (ret_val)
847 goto out;
848
849 hw_dbg(hw, "IGP PSCR: %X\n", phy_data);
850
851 udelay(1);
852
853 if (phy->autoneg_wait_to_complete) {
854 hw_dbg(hw,
855 "Waiting for forced speed/duplex link on IGP phy.\n");
856
857 ret_val = igb_phy_has_link(hw,
858 PHY_FORCE_LIMIT,
859 100000,
860 &link);
861 if (ret_val)
862 goto out;
863
864 if (!link)
865 hw_dbg(hw, "Link taking longer than expected.\n");
866
867 /* Try once more */
868 ret_val = igb_phy_has_link(hw,
869 PHY_FORCE_LIMIT,
870 100000,
871 &link);
872 if (ret_val)
873 goto out;
874 }
875
876out:
877 return ret_val;
878}
879
880/**
881 * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
882 * @hw: pointer to the HW structure
883 *
884 * Calls the PHY setup function to force speed and duplex. Clears the
885 * auto-crossover to force MDI manually. Resets the PHY to commit the
886 * changes. If time expires while waiting for link up, we reset the DSP.
887 * After reset, TX_CLK and CRS on TX must be set. Return successful upon
888 * successful completion, else return corresponding error code.
889 **/
890s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
891{
892 struct e1000_phy_info *phy = &hw->phy;
893 s32 ret_val;
894 u16 phy_data;
895 bool link;
896
897 /*
898 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
899 * forced whenever speed and duplex are forced.
900 */
901 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
902 &phy_data);
903 if (ret_val)
904 goto out;
905
906 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
907 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
908 phy_data);
909 if (ret_val)
910 goto out;
911
912 hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data);
913
914 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
915 if (ret_val)
916 goto out;
917
918 igb_phy_force_speed_duplex_setup(hw, &phy_data);
919
920 /* Reset the phy to commit changes. */
921 phy_data |= MII_CR_RESET;
922
923 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
924 if (ret_val)
925 goto out;
926
927 udelay(1);
928
929 if (phy->autoneg_wait_to_complete) {
930 hw_dbg(hw,
931 "Waiting for forced speed/duplex link on M88 phy.\n");
932
933 ret_val = igb_phy_has_link(hw,
934 PHY_FORCE_LIMIT,
935 100000,
936 &link);
937 if (ret_val)
938 goto out;
939
940 if (!link) {
941 /*
942 * We didn't get link.
943 * Reset the DSP and cross our fingers.
944 */
945 ret_val = hw->phy.ops.write_phy_reg(hw,
946 M88E1000_PHY_PAGE_SELECT,
947 0x001d);
948 if (ret_val)
949 goto out;
950 ret_val = igb_phy_reset_dsp(hw);
951 if (ret_val)
952 goto out;
953 }
954
955 /* Try once more */
956 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
957 100000, &link);
958 if (ret_val)
959 goto out;
960 }
961
962 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
963 &phy_data);
964 if (ret_val)
965 goto out;
966
967 /*
968 * Resetting the phy means we need to re-force TX_CLK in the
969 * Extended PHY Specific Control Register to 25MHz clock from
970 * the reset value of 2.5MHz.
971 */
972 phy_data |= M88E1000_EPSCR_TX_CLK_25;
973 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
974 phy_data);
975 if (ret_val)
976 goto out;
977
978 /*
979 * In addition, we must re-enable CRS on Tx for both half and full
980 * duplex.
981 */
982 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
983 &phy_data);
984 if (ret_val)
985 goto out;
986
987 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
988 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
989 phy_data);
990
991out:
992 return ret_val;
993}
994
995/**
996 * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
997 * @hw: pointer to the HW structure
998 * @phy_ctrl: pointer to current value of PHY_CONTROL
999 *
1000 * Forces speed and duplex on the PHY by doing the following: disable flow
1001 * control, force speed/duplex on the MAC, disable auto speed detection,
1002 * disable auto-negotiation, configure duplex, configure speed, configure
1003 * the collision distance, write configuration to CTRL register. The
1004 * caller must write to the PHY_CONTROL register for these settings to
1005 * take affect.
1006 **/
1007static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1008 u16 *phy_ctrl)
1009{
1010 struct e1000_mac_info *mac = &hw->mac;
1011 u32 ctrl;
1012
1013 /* Turn off flow control when forcing speed/duplex */
1014 hw->fc.type = e1000_fc_none;
1015
1016 /* Force speed/duplex on the mac */
1017 ctrl = rd32(E1000_CTRL);
1018 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1019 ctrl &= ~E1000_CTRL_SPD_SEL;
1020
1021 /* Disable Auto Speed Detection */
1022 ctrl &= ~E1000_CTRL_ASDE;
1023
1024 /* Disable autoneg on the phy */
1025 *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
1026
1027 /* Forcing Full or Half Duplex? */
1028 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1029 ctrl &= ~E1000_CTRL_FD;
1030 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1031 hw_dbg(hw, "Half Duplex\n");
1032 } else {
1033 ctrl |= E1000_CTRL_FD;
1034 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1035 hw_dbg(hw, "Full Duplex\n");
1036 }
1037
1038 /* Forcing 10mb or 100mb? */
1039 if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
1040 ctrl |= E1000_CTRL_SPD_100;
1041 *phy_ctrl |= MII_CR_SPEED_100;
1042 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1043 hw_dbg(hw, "Forcing 100mb\n");
1044 } else {
1045 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1046 *phy_ctrl |= MII_CR_SPEED_10;
1047 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1048 hw_dbg(hw, "Forcing 10mb\n");
1049 }
1050
1051 igb_config_collision_dist(hw);
1052
1053 wr32(E1000_CTRL, ctrl);
1054}
1055
1056/**
1057 * e1000_set_d3_lplu_state - Sets low power link up state for D3
1058 * @hw: pointer to the HW structure
1059 * @active: boolean used to enable/disable lplu
1060 *
1061 * Success returns 0, Failure returns 1
1062 *
1063 * The low power link up (lplu) state is set to the power management level D3
1064 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1065 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1066 * is used during Dx states where the power conservation is most important.
1067 * During driver activity, SmartSpeed should be enabled so performance is
1068 * maintained.
1069 **/
1070s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1071{
1072 struct e1000_phy_info *phy = &hw->phy;
1073 s32 ret_val;
1074 u16 data;
1075
1076 ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1077 &data);
1078 if (ret_val)
1079 goto out;
1080
1081 if (!active) {
1082 data &= ~IGP02E1000_PM_D3_LPLU;
1083 ret_val = hw->phy.ops.write_phy_reg(hw,
1084 IGP02E1000_PHY_POWER_MGMT,
1085 data);
1086 if (ret_val)
1087 goto out;
1088 /*
1089 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1090 * during Dx states where the power conservation is most
1091 * important. During driver activity we should enable
1092 * SmartSpeed, so performance is maintained.
1093 */
1094 if (phy->smart_speed == e1000_smart_speed_on) {
1095 ret_val = hw->phy.ops.read_phy_reg(hw,
1096 IGP01E1000_PHY_PORT_CONFIG,
1097 &data);
1098 if (ret_val)
1099 goto out;
1100
1101 data |= IGP01E1000_PSCFR_SMART_SPEED;
1102 ret_val = hw->phy.ops.write_phy_reg(hw,
1103 IGP01E1000_PHY_PORT_CONFIG,
1104 data);
1105 if (ret_val)
1106 goto out;
1107 } else if (phy->smart_speed == e1000_smart_speed_off) {
1108 ret_val = hw->phy.ops.read_phy_reg(hw,
1109 IGP01E1000_PHY_PORT_CONFIG,
1110 &data);
1111 if (ret_val)
1112 goto out;
1113
1114 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1115 ret_val = hw->phy.ops.write_phy_reg(hw,
1116 IGP01E1000_PHY_PORT_CONFIG,
1117 data);
1118 if (ret_val)
1119 goto out;
1120 }
1121 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1122 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1123 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1124 data |= IGP02E1000_PM_D3_LPLU;
1125 ret_val = hw->phy.ops.write_phy_reg(hw,
1126 IGP02E1000_PHY_POWER_MGMT,
1127 data);
1128 if (ret_val)
1129 goto out;
1130
1131 /* When LPLU is enabled, we should disable SmartSpeed */
1132 ret_val = hw->phy.ops.read_phy_reg(hw,
1133 IGP01E1000_PHY_PORT_CONFIG,
1134 &data);
1135 if (ret_val)
1136 goto out;
1137
1138 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1139 ret_val = hw->phy.ops.write_phy_reg(hw,
1140 IGP01E1000_PHY_PORT_CONFIG,
1141 data);
1142 }
1143
1144out:
1145 return ret_val;
1146}
1147
1148/**
1149 * e1000_check_downshift - Checks whether a downshift in speed occured
1150 * @hw: pointer to the HW structure
1151 *
1152 * Success returns 0, Failure returns 1
1153 *
1154 * A downshift is detected by querying the PHY link health.
1155 **/
1156s32 igb_check_downshift(struct e1000_hw *hw)
1157{
1158 struct e1000_phy_info *phy = &hw->phy;
1159 s32 ret_val;
1160 u16 phy_data, offset, mask;
1161
1162 switch (phy->type) {
1163 case e1000_phy_m88:
1164 case e1000_phy_gg82563:
1165 offset = M88E1000_PHY_SPEC_STATUS;
1166 mask = M88E1000_PSSR_DOWNSHIFT;
1167 break;
1168 case e1000_phy_igp_2:
1169 case e1000_phy_igp:
1170 case e1000_phy_igp_3:
1171 offset = IGP01E1000_PHY_LINK_HEALTH;
1172 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1173 break;
1174 default:
1175 /* speed downshift not supported */
1176 phy->speed_downgraded = false;
1177 ret_val = 0;
1178 goto out;
1179 }
1180
1181 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &phy_data);
1182
1183 if (!ret_val)
1184 phy->speed_downgraded = (phy_data & mask) ? true : false;
1185
1186out:
1187 return ret_val;
1188}
1189
1190/**
1191 * e1000_check_polarity_m88 - Checks the polarity.
1192 * @hw: pointer to the HW structure
1193 *
1194 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1195 *
1196 * Polarity is determined based on the PHY specific status register.
1197 **/
1198static s32 igb_check_polarity_m88(struct e1000_hw *hw)
1199{
1200 struct e1000_phy_info *phy = &hw->phy;
1201 s32 ret_val;
1202 u16 data;
1203
1204 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
1205
1206 if (!ret_val)
1207 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
1208 ? e1000_rev_polarity_reversed
1209 : e1000_rev_polarity_normal;
1210
1211 return ret_val;
1212}
1213
1214/**
1215 * e1000_check_polarity_igp - Checks the polarity.
1216 * @hw: pointer to the HW structure
1217 *
1218 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1219 *
1220 * Polarity is determined based on the PHY port status register, and the
1221 * current speed (since there is no polarity at 100Mbps).
1222 **/
1223static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1224{
1225 struct e1000_phy_info *phy = &hw->phy;
1226 s32 ret_val;
1227 u16 data, offset, mask;
1228
1229 /*
1230 * Polarity is determined based on the speed of
1231 * our connection.
1232 */
1233 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
1234 &data);
1235 if (ret_val)
1236 goto out;
1237
1238 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1239 IGP01E1000_PSSR_SPEED_1000MBPS) {
1240 offset = IGP01E1000_PHY_PCS_INIT_REG;
1241 mask = IGP01E1000_PHY_POLARITY_MASK;
1242 } else {
1243 /*
1244 * This really only applies to 10Mbps since
1245 * there is no polarity for 100Mbps (always 0).
1246 */
1247 offset = IGP01E1000_PHY_PORT_STATUS;
1248 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1249 }
1250
1251 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &data);
1252
1253 if (!ret_val)
1254 phy->cable_polarity = (data & mask)
1255 ? e1000_rev_polarity_reversed
1256 : e1000_rev_polarity_normal;
1257
1258out:
1259 return ret_val;
1260}
1261
1262/**
1263 * e1000_wait_autoneg - Wait for auto-neg compeletion
1264 * @hw: pointer to the HW structure
1265 *
1266 * Waits for auto-negotiation to complete or for the auto-negotiation time
1267 * limit to expire, which ever happens first.
1268 **/
1269static s32 igb_wait_autoneg(struct e1000_hw *hw)
1270{
1271 s32 ret_val = 0;
1272 u16 i, phy_status;
1273
1274 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1275 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1276 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
1277 if (ret_val)
1278 break;
1279 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
1280 if (ret_val)
1281 break;
1282 if (phy_status & MII_SR_AUTONEG_COMPLETE)
1283 break;
1284 msleep(100);
1285 }
1286
1287 /*
1288 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1289 * has completed.
1290 */
1291 return ret_val;
1292}
1293
1294/**
1295 * e1000_phy_has_link - Polls PHY for link
1296 * @hw: pointer to the HW structure
1297 * @iterations: number of times to poll for link
1298 * @usec_interval: delay between polling attempts
1299 * @success: pointer to whether polling was successful or not
1300 *
1301 * Polls the PHY status register for link, 'iterations' number of times.
1302 **/
1303s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1304 u32 usec_interval, bool *success)
1305{
1306 s32 ret_val = 0;
1307 u16 i, phy_status;
1308
1309 for (i = 0; i < iterations; i++) {
1310 /*
1311 * Some PHYs require the PHY_STATUS register to be read
1312 * twice due to the link bit being sticky. No harm doing
1313 * it across the board.
1314 */
1315 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
1316 if (ret_val)
1317 break;
1318 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
1319 if (ret_val)
1320 break;
1321 if (phy_status & MII_SR_LINK_STATUS)
1322 break;
1323 if (usec_interval >= 1000)
1324 mdelay(usec_interval/1000);
1325 else
1326 udelay(usec_interval);
1327 }
1328
1329 *success = (i < iterations) ? true : false;
1330
1331 return ret_val;
1332}
1333
1334/**
1335 * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
1336 * @hw: pointer to the HW structure
1337 *
1338 * Reads the PHY specific status register to retrieve the cable length
1339 * information. The cable length is determined by averaging the minimum and
1340 * maximum values to get the "average" cable length. The m88 PHY has four
1341 * possible cable length values, which are:
1342 * Register Value Cable Length
1343 * 0 < 50 meters
1344 * 1 50 - 80 meters
1345 * 2 80 - 110 meters
1346 * 3 110 - 140 meters
1347 * 4 > 140 meters
1348 **/
1349s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1350{
1351 struct e1000_phy_info *phy = &hw->phy;
1352 s32 ret_val;
1353 u16 phy_data, index;
1354
1355 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1356 &phy_data);
1357 if (ret_val)
1358 goto out;
1359
1360 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1361 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1362 phy->min_cable_length = e1000_m88_cable_length_table[index];
1363 phy->max_cable_length = e1000_m88_cable_length_table[index+1];
1364
1365 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1366
1367out:
1368 return ret_val;
1369}
1370
1371/**
1372 * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1373 * @hw: pointer to the HW structure
1374 *
1375 * The automatic gain control (agc) normalizes the amplitude of the
1376 * received signal, adjusting for the attenuation produced by the
1377 * cable. By reading the AGC registers, which reperesent the
1378 * cobination of course and fine gain value, the value can be put
1379 * into a lookup table to obtain the approximate cable length
1380 * for each channel.
1381 **/
1382s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1383{
1384 struct e1000_phy_info *phy = &hw->phy;
1385 s32 ret_val = 0;
1386 u16 phy_data, i, agc_value = 0;
1387 u16 cur_agc_index, max_agc_index = 0;
1388 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1389 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
1390 {IGP02E1000_PHY_AGC_A,
1391 IGP02E1000_PHY_AGC_B,
1392 IGP02E1000_PHY_AGC_C,
1393 IGP02E1000_PHY_AGC_D};
1394
1395 /* Read the AGC registers for all channels */
1396 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1397 ret_val = hw->phy.ops.read_phy_reg(hw, agc_reg_array[i],
1398 &phy_data);
1399 if (ret_val)
1400 goto out;
1401
1402 /*
1403 * Getting bits 15:9, which represent the combination of
1404 * course and fine gain values. The result is a number
1405 * that can be put into the lookup table to obtain the
1406 * approximate cable length.
1407 */
1408 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1409 IGP02E1000_AGC_LENGTH_MASK;
1410
1411 /* Array index bound check. */
1412 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
1413 (cur_agc_index == 0)) {
1414 ret_val = -E1000_ERR_PHY;
1415 goto out;
1416 }
1417
1418 /* Remove min & max AGC values from calculation. */
1419 if (e1000_igp_2_cable_length_table[min_agc_index] >
1420 e1000_igp_2_cable_length_table[cur_agc_index])
1421 min_agc_index = cur_agc_index;
1422 if (e1000_igp_2_cable_length_table[max_agc_index] <
1423 e1000_igp_2_cable_length_table[cur_agc_index])
1424 max_agc_index = cur_agc_index;
1425
1426 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
1427 }
1428
1429 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
1430 e1000_igp_2_cable_length_table[max_agc_index]);
1431 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1432
1433 /* Calculate cable length with the error range of +/- 10 meters. */
1434 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1435 (agc_value - IGP02E1000_AGC_RANGE) : 0;
1436 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1437
1438 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1439
1440out:
1441 return ret_val;
1442}
1443
1444/**
1445 * e1000_get_phy_info_m88 - Retrieve PHY information
1446 * @hw: pointer to the HW structure
1447 *
1448 * Valid for only copper links. Read the PHY status register (sticky read)
1449 * to verify that link is up. Read the PHY special control register to
1450 * determine the polarity and 10base-T extended distance. Read the PHY
1451 * special status register to determine MDI/MDIx and current speed. If
1452 * speed is 1000, then determine cable length, local and remote receiver.
1453 **/
1454s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1455{
1456 struct e1000_phy_info *phy = &hw->phy;
1457 s32 ret_val;
1458 u16 phy_data;
1459 bool link;
1460
1461 if (hw->phy.media_type != e1000_media_type_copper) {
1462 hw_dbg(hw, "Phy info is only valid for copper media\n");
1463 ret_val = -E1000_ERR_CONFIG;
1464 goto out;
1465 }
1466
1467 ret_val = igb_phy_has_link(hw, 1, 0, &link);
1468 if (ret_val)
1469 goto out;
1470
1471 if (!link) {
1472 hw_dbg(hw, "Phy info is only valid if link is up\n");
1473 ret_val = -E1000_ERR_CONFIG;
1474 goto out;
1475 }
1476
1477 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
1478 &phy_data);
1479 if (ret_val)
1480 goto out;
1481
1482 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
1483 ? true
1484 : false;
1485
1486 ret_val = igb_check_polarity_m88(hw);
1487 if (ret_val)
1488 goto out;
1489
1490 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1491 &phy_data);
1492 if (ret_val)
1493 goto out;
1494
1495 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
1496
1497 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1498 ret_val = hw->phy.ops.get_cable_length(hw);
1499 if (ret_val)
1500 goto out;
1501
1502 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
1503 &phy_data);
1504 if (ret_val)
1505 goto out;
1506
1507 phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
1508 ? e1000_1000t_rx_status_ok
1509 : e1000_1000t_rx_status_not_ok;
1510
1511 phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
1512 ? e1000_1000t_rx_status_ok
1513 : e1000_1000t_rx_status_not_ok;
1514 } else {
1515 /* Set values to "undefined" */
1516 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1517 phy->local_rx = e1000_1000t_rx_status_undefined;
1518 phy->remote_rx = e1000_1000t_rx_status_undefined;
1519 }
1520
1521out:
1522 return ret_val;
1523}
1524
1525/**
1526 * e1000_get_phy_info_igp - Retrieve igp PHY information
1527 * @hw: pointer to the HW structure
1528 *
1529 * Read PHY status to determine if link is up. If link is up, then
1530 * set/determine 10base-T extended distance and polarity correction. Read
1531 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
1532 * determine on the cable length, local and remote receiver.
1533 **/
1534s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1535{
1536 struct e1000_phy_info *phy = &hw->phy;
1537 s32 ret_val;
1538 u16 data;
1539 bool link;
1540
1541 ret_val = igb_phy_has_link(hw, 1, 0, &link);
1542 if (ret_val)
1543 goto out;
1544
1545 if (!link) {
1546 hw_dbg(hw, "Phy info is only valid if link is up\n");
1547 ret_val = -E1000_ERR_CONFIG;
1548 goto out;
1549 }
1550
1551 phy->polarity_correction = true;
1552
1553 ret_val = igb_check_polarity_igp(hw);
1554 if (ret_val)
1555 goto out;
1556
1557 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
1558 &data);
1559 if (ret_val)
1560 goto out;
1561
1562 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
1563
1564 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1565 IGP01E1000_PSSR_SPEED_1000MBPS) {
1566 ret_val = hw->phy.ops.get_cable_length(hw);
1567 if (ret_val)
1568 goto out;
1569
1570 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
1571 &data);
1572 if (ret_val)
1573 goto out;
1574
1575 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
1576 ? e1000_1000t_rx_status_ok
1577 : e1000_1000t_rx_status_not_ok;
1578
1579 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
1580 ? e1000_1000t_rx_status_ok
1581 : e1000_1000t_rx_status_not_ok;
1582 } else {
1583 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1584 phy->local_rx = e1000_1000t_rx_status_undefined;
1585 phy->remote_rx = e1000_1000t_rx_status_undefined;
1586 }
1587
1588out:
1589 return ret_val;
1590}
1591
1592/**
1593 * e1000_phy_sw_reset - PHY software reset
1594 * @hw: pointer to the HW structure
1595 *
1596 * Does a software reset of the PHY by reading the PHY control register and
1597 * setting/write the control register reset bit to the PHY.
1598 **/
1599s32 igb_phy_sw_reset(struct e1000_hw *hw)
1600{
1601 s32 ret_val;
1602 u16 phy_ctrl;
1603
1604 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
1605 if (ret_val)
1606 goto out;
1607
1608 phy_ctrl |= MII_CR_RESET;
1609 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
1610 if (ret_val)
1611 goto out;
1612
1613 udelay(1);
1614
1615out:
1616 return ret_val;
1617}
1618
1619/**
1620 * e1000_phy_hw_reset - PHY hardware reset
1621 * @hw: pointer to the HW structure
1622 *
1623 * Verify the reset block is not blocking us from resetting. Acquire
1624 * semaphore (if necessary) and read/set/write the device control reset
1625 * bit in the PHY. Wait the appropriate delay time for the device to
1626 * reset and relase the semaphore (if necessary).
1627 **/
1628s32 igb_phy_hw_reset(struct e1000_hw *hw)
1629{
1630 struct e1000_phy_info *phy = &hw->phy;
1631 s32 ret_val;
1632 u32 ctrl;
1633
1634 ret_val = igb_check_reset_block(hw);
1635 if (ret_val) {
1636 ret_val = 0;
1637 goto out;
1638 }
1639
1640 ret_val = igb_acquire_phy(hw);
1641 if (ret_val)
1642 goto out;
1643
1644 ctrl = rd32(E1000_CTRL);
1645 wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
1646 wrfl();
1647
1648 udelay(phy->reset_delay_us);
1649
1650 wr32(E1000_CTRL, ctrl);
1651 wrfl();
1652
1653 udelay(150);
1654
1655 igb_release_phy(hw);
1656
1657 ret_val = igb_get_phy_cfg_done(hw);
1658
1659out:
1660 return ret_val;
1661}
1662
1663/* Internal function pointers */
1664
1665/**
1666 * e1000_get_phy_cfg_done - Generic PHY configuration done
1667 * @hw: pointer to the HW structure
1668 *
1669 * Return success if silicon family did not implement a family specific
1670 * get_cfg_done function.
1671 **/
1672static s32 igb_get_phy_cfg_done(struct e1000_hw *hw)
1673{
1674 if (hw->phy.ops.get_cfg_done)
1675 return hw->phy.ops.get_cfg_done(hw);
1676
1677 return 0;
1678}
1679
1680/**
1681 * e1000_release_phy - Generic release PHY
1682 * @hw: pointer to the HW structure
1683 *
1684 * Return if silicon family does not require a semaphore when accessing the
1685 * PHY.
1686 **/
1687static void igb_release_phy(struct e1000_hw *hw)
1688{
1689 if (hw->phy.ops.release_phy)
1690 hw->phy.ops.release_phy(hw);
1691}
1692
1693/**
1694 * e1000_acquire_phy - Generic acquire PHY
1695 * @hw: pointer to the HW structure
1696 *
1697 * Return success if silicon family does not require a semaphore when
1698 * accessing the PHY.
1699 **/
1700static s32 igb_acquire_phy(struct e1000_hw *hw)
1701{
1702 if (hw->phy.ops.acquire_phy)
1703 return hw->phy.ops.acquire_phy(hw);
1704
1705 return 0;
1706}
1707
1708/**
1709 * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
1710 * @hw: pointer to the HW structure
1711 *
1712 * When the silicon family has not implemented a forced speed/duplex
1713 * function for the PHY, simply return 0.
1714 **/
1715s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
1716{
1717 if (hw->phy.ops.force_speed_duplex)
1718 return hw->phy.ops.force_speed_duplex(hw);
1719
1720 return 0;
1721}
1722
1723/**
1724 * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
1725 * @hw: pointer to the HW structure
1726 *
1727 * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
1728 **/
1729s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1730{
1731 hw_dbg(hw, "Running IGP 3 PHY init script\n");
1732
1733 /* PHY init IGP 3 */
1734 /* Enable rise/fall, 10-mode work in class-A */
1735 hw->phy.ops.write_phy_reg(hw, 0x2F5B, 0x9018);
1736 /* Remove all caps from Replica path filter */
1737 hw->phy.ops.write_phy_reg(hw, 0x2F52, 0x0000);
1738 /* Bias trimming for ADC, AFE and Driver (Default) */
1739 hw->phy.ops.write_phy_reg(hw, 0x2FB1, 0x8B24);
1740 /* Increase Hybrid poly bias */
1741 hw->phy.ops.write_phy_reg(hw, 0x2FB2, 0xF8F0);
1742 /* Add 4% to TX amplitude in Giga mode */
1743 hw->phy.ops.write_phy_reg(hw, 0x2010, 0x10B0);
1744 /* Disable trimming (TTT) */
1745 hw->phy.ops.write_phy_reg(hw, 0x2011, 0x0000);
1746 /* Poly DC correction to 94.6% + 2% for all channels */
1747 hw->phy.ops.write_phy_reg(hw, 0x20DD, 0x249A);
1748 /* ABS DC correction to 95.9% */
1749 hw->phy.ops.write_phy_reg(hw, 0x20DE, 0x00D3);
1750 /* BG temp curve trim */
1751 hw->phy.ops.write_phy_reg(hw, 0x28B4, 0x04CE);
1752 /* Increasing ADC OPAMP stage 1 currents to max */
1753 hw->phy.ops.write_phy_reg(hw, 0x2F70, 0x29E4);
1754 /* Force 1000 ( required for enabling PHY regs configuration) */
1755 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x0140);
1756 /* Set upd_freq to 6 */
1757 hw->phy.ops.write_phy_reg(hw, 0x1F30, 0x1606);
1758 /* Disable NPDFE */
1759 hw->phy.ops.write_phy_reg(hw, 0x1F31, 0xB814);
1760 /* Disable adaptive fixed FFE (Default) */
1761 hw->phy.ops.write_phy_reg(hw, 0x1F35, 0x002A);
1762 /* Enable FFE hysteresis */
1763 hw->phy.ops.write_phy_reg(hw, 0x1F3E, 0x0067);
1764 /* Fixed FFE for short cable lengths */
1765 hw->phy.ops.write_phy_reg(hw, 0x1F54, 0x0065);
1766 /* Fixed FFE for medium cable lengths */
1767 hw->phy.ops.write_phy_reg(hw, 0x1F55, 0x002A);
1768 /* Fixed FFE for long cable lengths */
1769 hw->phy.ops.write_phy_reg(hw, 0x1F56, 0x002A);
1770 /* Enable Adaptive Clip Threshold */
1771 hw->phy.ops.write_phy_reg(hw, 0x1F72, 0x3FB0);
1772 /* AHT reset limit to 1 */
1773 hw->phy.ops.write_phy_reg(hw, 0x1F76, 0xC0FF);
1774 /* Set AHT master delay to 127 msec */
1775 hw->phy.ops.write_phy_reg(hw, 0x1F77, 0x1DEC);
1776 /* Set scan bits for AHT */
1777 hw->phy.ops.write_phy_reg(hw, 0x1F78, 0xF9EF);
1778 /* Set AHT Preset bits */
1779 hw->phy.ops.write_phy_reg(hw, 0x1F79, 0x0210);
1780 /* Change integ_factor of channel A to 3 */
1781 hw->phy.ops.write_phy_reg(hw, 0x1895, 0x0003);
1782 /* Change prop_factor of channels BCD to 8 */
1783 hw->phy.ops.write_phy_reg(hw, 0x1796, 0x0008);
1784 /* Change cg_icount + enable integbp for channels BCD */
1785 hw->phy.ops.write_phy_reg(hw, 0x1798, 0xD008);
1786 /*
1787 * Change cg_icount + enable integbp + change prop_factor_master
1788 * to 8 for channel A
1789 */
1790 hw->phy.ops.write_phy_reg(hw, 0x1898, 0xD918);
1791 /* Disable AHT in Slave mode on channel A */
1792 hw->phy.ops.write_phy_reg(hw, 0x187A, 0x0800);
1793 /*
1794 * Enable LPLU and disable AN to 1000 in non-D0a states,
1795 * Enable SPD+B2B
1796 */
1797 hw->phy.ops.write_phy_reg(hw, 0x0019, 0x008D);
1798 /* Enable restart AN on an1000_dis change */
1799 hw->phy.ops.write_phy_reg(hw, 0x001B, 0x2080);
1800 /* Enable wh_fifo read clock in 10/100 modes */
1801 hw->phy.ops.write_phy_reg(hw, 0x0014, 0x0045);
1802 /* Restart AN, Speed selection is 1000 */
1803 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x1340);
1804
1805 return 0;
1806}
1807
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
new file mode 100644
index 000000000000..8f8fe0a780d1
--- /dev/null
+++ b/drivers/net/igb/e1000_phy.h
@@ -0,0 +1,98 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_PHY_H_
29#define _E1000_PHY_H_
30
31enum e1000_ms_type {
32 e1000_ms_hw_default = 0,
33 e1000_ms_force_master,
34 e1000_ms_force_slave,
35 e1000_ms_auto
36};
37
38enum e1000_smart_speed {
39 e1000_smart_speed_default = 0,
40 e1000_smart_speed_on,
41 e1000_smart_speed_off
42};
43
44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_phy_force_speed_duplex(struct e1000_hw *hw);
48s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
49s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
50s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
51s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
52s32 igb_get_cable_length_m88(struct e1000_hw *hw);
53s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
54s32 igb_get_phy_id(struct e1000_hw *hw);
55s32 igb_get_phy_info_igp(struct e1000_hw *hw);
56s32 igb_get_phy_info_m88(struct e1000_hw *hw);
57s32 igb_phy_sw_reset(struct e1000_hw *hw);
58s32 igb_phy_hw_reset(struct e1000_hw *hw);
59s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
60s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
61s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
62s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
63 u32 usec_interval, bool *success);
64s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
65
66/* IGP01E1000 Specific Registers */
67#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
68#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
69#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
70#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
71#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
72#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
73#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
74#define IGP01E1000_PHY_POLARITY_MASK 0x0078
75#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
76#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
77#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
78
79/* Enable flexible speed on link-up */
80#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
81#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
82#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
83#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
84#define IGP01E1000_PSSR_MDIX 0x0008
85#define IGP01E1000_PSSR_SPEED_MASK 0xC000
86#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
87#define IGP02E1000_PHY_CHANNEL_NUM 4
88#define IGP02E1000_PHY_AGC_A 0x11B1
89#define IGP02E1000_PHY_AGC_B 0x12B1
90#define IGP02E1000_PHY_AGC_C 0x14B1
91#define IGP02E1000_PHY_AGC_D 0x18B1
92#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
93#define IGP02E1000_AGC_LENGTH_MASK 0x7F
94#define IGP02E1000_AGC_RANGE 15
95
96#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
97
98#endif
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
new file mode 100644
index 000000000000..ff187b73c69e
--- /dev/null
+++ b/drivers/net/igb/e1000_regs.h
@@ -0,0 +1,270 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_
30
31#define E1000_CTRL 0x00000 /* Device Control - RW */
32#define E1000_STATUS 0x00008 /* Device Status - RO */
33#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
34#define E1000_EERD 0x00014 /* EEPROM Read - RW */
35#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
36#define E1000_MDIC 0x00020 /* MDI Control - RW */
37#define E1000_SCTL 0x00024 /* SerDes Control - RW */
38#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
39#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
40#define E1000_FCT 0x00030 /* Flow Control Type - RW */
41#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
43#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
44#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
45#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
46#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
47#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
48#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
49#define E1000_RCTL 0x00100 /* RX Control - RW */
50#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
51#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
52#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
53#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
54#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
55#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
56#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
57#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
58#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
59#define E1000_TCTL 0x00400 /* TX Control - RW */
60#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
61#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
62#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
63#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
64#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
65#define E1000_PBS 0x01008 /* Packet Buffer Size */
66#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
67#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
68#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
69#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
70#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
71#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
72#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
73#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
74#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
75/* Split and Replication RX Control - RW */
76/*
77 * Convenience macros
78 *
79 * Note: "_n" is the queue number of the register to be written to.
80 *
81 * Example usage:
82 * E1000_RDBAL_REG(current_rx_queue)
83 */
84#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
85 : (0x0C000 + ((_n) * 0x40)))
86#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
87 : (0x0C004 + ((_n) * 0x40)))
88#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
89 : (0x0C008 + ((_n) * 0x40)))
90#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
91 : (0x0C00C + ((_n) * 0x40)))
92#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
93 : (0x0C010 + ((_n) * 0x40)))
94#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
95 : (0x0C018 + ((_n) * 0x40)))
96#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
97 : (0x0C028 + ((_n) * 0x40)))
98#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
99 : (0x0E000 + ((_n) * 0x40)))
100#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
101 : (0x0E004 + ((_n) * 0x40)))
102#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
103 : (0x0E008 + ((_n) * 0x40)))
104#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
105 : (0x0E010 + ((_n) * 0x40)))
106#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
107 : (0x0E018 + ((_n) * 0x40)))
108#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
109 : (0x0E028 + ((_n) * 0x40)))
110#define E1000_TARC(_n) (0x03840 + (_n << 8))
111#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
112#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
113#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
114 : (0x0E038 + ((_n) * 0x40)))
115#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
116 : (0x0E03C + ((_n) * 0x40)))
117#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
118#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
119#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
120#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
121#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
122#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
123#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
124#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
125#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
126#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
127#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
128#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
129#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
130#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
131#define E1000_COLC 0x04028 /* Collision Count - R/clr */
132#define E1000_DC 0x04030 /* Defer Count - R/clr */
133#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
134#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
135#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
136#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
137#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
138#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
139#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
140#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
141#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
142#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
143#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
144#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
145#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
146#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
147#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
148#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
149#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
150#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
151#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
152#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
153#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
154#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
155#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
156#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
157#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
158#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
159#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
160#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
161#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
162#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
163#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
164#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
165#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
166#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
167#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
168#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
169#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
170#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
171#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
172#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
173#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
174#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
175#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
176#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
177#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
178#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
179#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
180#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
181/* Interrupt Cause Rx Packet Timer Expire Count */
182#define E1000_ICRXPTC 0x04104
183/* Interrupt Cause Rx Absolute Timer Expire Count */
184#define E1000_ICRXATC 0x04108
185/* Interrupt Cause Tx Packet Timer Expire Count */
186#define E1000_ICTXPTC 0x0410C
187/* Interrupt Cause Tx Absolute Timer Expire Count */
188#define E1000_ICTXATC 0x04110
189/* Interrupt Cause Tx Queue Empty Count */
190#define E1000_ICTXQEC 0x04118
191/* Interrupt Cause Tx Queue Minimum Threshold Count */
192#define E1000_ICTXQMTC 0x0411C
193/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
194#define E1000_ICRXDMTC 0x04120
195#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
196#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
197#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
198#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
199#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
200#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
201#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
202#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
203#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
204#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
205#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
206#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
207#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
208#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
209#define E1000_LENERRS 0x04138 /* Length Errors Count */
210#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
211#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
212#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
213#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
214#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
215#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
216#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
217#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
218#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
219#define E1000_RA 0x05400 /* Receive Address - RW Array */
220#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
221#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
222#define E1000_WUC 0x05800 /* Wakeup Control - RW */
223#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
224#define E1000_WUS 0x05810 /* Wakeup Status - RO */
225#define E1000_MANC 0x05820 /* Management Control - RW */
226#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
227#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
228#define E1000_HOST_IF 0x08800 /* Host Interface */
229
230#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
231#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
232#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
233#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
234#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
235#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
236#define E1000_SWSM 0x05B50 /* SW Semaphore */
237#define E1000_FWSM 0x05B54 /* FW Semaphore */
238#define E1000_HICR 0x08F00 /* Host Inteface Control */
239
240/* RSS registers */
241#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
242#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
243#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
244#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
245/* MSI-X Allocation Register (_i) - RW */
246#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
247/* MSI-X Table entry addr low reg 0 - RW */
248#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10))
249/* MSI-X Table entry addr upper reg 0 - RW */
250#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10))
251/* MSI-X Table entry message reg 0 - RW */
252#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10))
253/* MSI-X Table entry vector ctrl reg 0 - RW */
254#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10))
255/* Redirection Table - RW Array */
256#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
257#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
258
259#define E1000_REGISTER(a, reg) reg
260
261#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
262#define rd32(reg) (readl(hw->hw_addr + reg))
263#define wrfl() ((void)rd32(E1000_STATUS))
264
265#define array_wr32(reg, offset, value) \
266 (writel(value, hw->hw_addr + reg + ((offset) << 2)))
267#define array_rd32(reg, offset) \
268 (readl(hw->hw_addr + reg + ((offset) << 2)))
269
270#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
new file mode 100644
index 000000000000..6b2e7d351d65
--- /dev/null
+++ b/drivers/net/igb/igb.h
@@ -0,0 +1,300 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/* Linux PRO/1000 Ethernet Driver main header file */
30
31#ifndef _IGB_H_
32#define _IGB_H_
33
34#include "e1000_mac.h"
35#include "e1000_82575.h"
36
37struct igb_adapter;
38
39/* Interrupt defines */
40#define IGB_MAX_TX_CLEAN 72
41
42#define IGB_MIN_DYN_ITR 3000
43#define IGB_MAX_DYN_ITR 96000
44#define IGB_START_ITR 6000
45
46#define IGB_DYN_ITR_PACKET_THRESHOLD 2
47#define IGB_DYN_ITR_LENGTH_LOW 200
48#define IGB_DYN_ITR_LENGTH_HIGH 1000
49
50/* TX/RX descriptor defines */
51#define IGB_DEFAULT_TXD 256
52#define IGB_MIN_TXD 80
53#define IGB_MAX_TXD 4096
54
55#define IGB_DEFAULT_RXD 256
56#define IGB_MIN_RXD 80
57#define IGB_MAX_RXD 4096
58
59#define IGB_DEFAULT_ITR 3 /* dynamic */
60#define IGB_MAX_ITR_USECS 10000
61#define IGB_MIN_ITR_USECS 10
62
63/* Transmit and receive queues */
64#define IGB_MAX_RX_QUEUES 4
65
66/* RX descriptor control thresholds.
67 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
68 * descriptors available in its onboard memory.
69 * Setting this to 0 disables RX descriptor prefetch.
70 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
71 * available in host memory.
72 * If PTHRESH is 0, this should also be 0.
73 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
74 * descriptors until either it has this many to write back, or the
75 * ITR timer expires.
76 */
77#define IGB_RX_PTHRESH 16
78#define IGB_RX_HTHRESH 8
79#define IGB_RX_WTHRESH 1
80
81/* this is the size past which hardware will drop packets when setting LPE=0 */
82#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
83
84/* Supported Rx Buffer Sizes */
85#define IGB_RXBUFFER_128 128 /* Used for packet split */
86#define IGB_RXBUFFER_256 256 /* Used for packet split */
87#define IGB_RXBUFFER_512 512
88#define IGB_RXBUFFER_1024 1024
89#define IGB_RXBUFFER_2048 2048
90#define IGB_RXBUFFER_4096 4096
91#define IGB_RXBUFFER_8192 8192
92#define IGB_RXBUFFER_16384 16384
93
94/* Packet Buffer allocations */
95
96
97/* How many Tx Descriptors do we need to call netif_wake_queue ? */
98#define IGB_TX_QUEUE_WAKE 16
99/* How many Rx Buffers do we bundle into one write to the hardware ? */
100#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
101
102#define AUTO_ALL_MODES 0
103#define IGB_EEPROM_APME 0x0400
104
105#ifndef IGB_MASTER_SLAVE
106/* Switch to override PHY master/slave setting */
107#define IGB_MASTER_SLAVE e1000_ms_hw_default
108#endif
109
110#define IGB_MNG_VLAN_NONE -1
111
112/* wrapper around a pointer to a socket buffer,
113 * so a DMA handle can be stored along with the buffer */
114struct igb_buffer {
115 struct sk_buff *skb;
116 dma_addr_t dma;
117 union {
118 /* TX */
119 struct {
120 unsigned long time_stamp;
121 u32 length;
122 };
123 /* RX */
124 struct {
125 struct page *page;
126 u64 page_dma;
127 };
128 };
129};
130
131struct igb_queue_stats {
132 u64 packets;
133 u64 bytes;
134};
135
136struct igb_ring {
137 struct igb_adapter *adapter; /* backlink */
138 void *desc; /* descriptor ring memory */
139 dma_addr_t dma; /* phys address of the ring */
140 unsigned int size; /* length of desc. ring in bytes */
141 unsigned int count; /* number of desc. in the ring */
142 u16 next_to_use;
143 u16 next_to_clean;
144 u16 head;
145 u16 tail;
146 struct igb_buffer *buffer_info; /* array of buffer info structs */
147
148 u32 eims_value;
149 u32 itr_val;
150 u16 itr_register;
151 u16 cpu;
152
153 unsigned int total_bytes;
154 unsigned int total_packets;
155
156 union {
157 /* TX */
158 struct {
159 spinlock_t tx_clean_lock;
160 spinlock_t tx_lock;
161 bool detect_tx_hung;
162 };
163 /* RX */
164 struct {
165 /* arrays of page information for packet split */
166 struct sk_buff *pending_skb;
167 int pending_skb_page;
168 int no_itr_adjust;
169 struct igb_queue_stats rx_stats;
170 struct napi_struct napi;
171 };
172 };
173
174 char name[IFNAMSIZ + 5];
175};
176
177#define IGB_DESC_UNUSED(R) \
178 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
179 (R)->next_to_clean - (R)->next_to_use - 1)
180
181#define E1000_RX_DESC_ADV(R, i) \
182 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
183#define E1000_TX_DESC_ADV(R, i) \
184 (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
185#define E1000_TX_CTXTDESC_ADV(R, i) \
186 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
187#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
188#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
189#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
190
191/* board specific private data structure */
192
193struct igb_adapter {
194 struct timer_list watchdog_timer;
195 struct timer_list phy_info_timer;
196 struct vlan_group *vlgrp;
197 u16 mng_vlan_id;
198 u32 bd_number;
199 u32 rx_buffer_len;
200 u32 wol;
201 u32 en_mng_pt;
202 u16 link_speed;
203 u16 link_duplex;
204 unsigned int total_tx_bytes;
205 unsigned int total_tx_packets;
206 unsigned int total_rx_bytes;
207 unsigned int total_rx_packets;
208 /* Interrupt Throttle Rate */
209 u32 itr;
210 u32 itr_setting;
211 u16 tx_itr;
212 u16 rx_itr;
213 int set_itr;
214
215 struct work_struct reset_task;
216 struct work_struct watchdog_task;
217 bool fc_autoneg;
218 u8 tx_timeout_factor;
219 struct timer_list blink_timer;
220 unsigned long led_status;
221
222 /* TX */
223 struct igb_ring *tx_ring; /* One per active queue */
224 unsigned int restart_queue;
225 unsigned long tx_queue_len;
226 u32 txd_cmd;
227 u32 gotc;
228 u64 gotc_old;
229 u64 tpt_old;
230 u64 colc_old;
231 u32 tx_timeout_count;
232
233 /* RX */
234 struct igb_ring *rx_ring; /* One per active queue */
235 int num_tx_queues;
236 int num_rx_queues;
237
238 u64 hw_csum_err;
239 u64 hw_csum_good;
240 u64 rx_hdr_split;
241 u32 alloc_rx_buff_failed;
242 bool rx_csum;
243 u32 gorc;
244 u64 gorc_old;
245 u16 rx_ps_hdr_size;
246 u32 max_frame_size;
247 u32 min_frame_size;
248
249 /* OS defined structs */
250 struct net_device *netdev;
251 struct napi_struct napi;
252 struct pci_dev *pdev;
253 struct net_device_stats net_stats;
254
255 /* structs defined in e1000_hw.h */
256 struct e1000_hw hw;
257 struct e1000_hw_stats stats;
258 struct e1000_phy_info phy_info;
259 struct e1000_phy_stats phy_stats;
260
261 u32 test_icr;
262 struct igb_ring test_tx_ring;
263 struct igb_ring test_rx_ring;
264
265 int msg_enable;
266 struct msix_entry *msix_entries;
267 u32 eims_enable_mask;
268
269 /* to not mess up cache alignment, always add to the bottom */
270 unsigned long state;
271 unsigned int msi_enabled;
272
273 u32 eeprom_wol;
274};
275
276enum e1000_state_t {
277 __IGB_TESTING,
278 __IGB_RESETTING,
279 __IGB_DOWN
280};
281
282enum igb_boards {
283 board_82575,
284};
285
286extern char igb_driver_name[];
287extern char igb_driver_version[];
288
289extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
290extern int igb_up(struct igb_adapter *);
291extern void igb_down(struct igb_adapter *);
292extern void igb_reinit_locked(struct igb_adapter *);
293extern void igb_reset(struct igb_adapter *);
294extern int igb_set_spd_dplx(struct igb_adapter *, u16);
295extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
296extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
297extern void igb_update_stats(struct igb_adapter *);
298extern void igb_set_ethtool_ops(struct net_device *);
299
300#endif /* _IGB_H_ */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
new file mode 100644
index 000000000000..f69721e4eaa1
--- /dev/null
+++ b/drivers/net/igb/igb_ethtool.c
@@ -0,0 +1,1927 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igb */
29
30#include <linux/vmalloc.h>
31#include <linux/netdevice.h>
32#include <linux/pci.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/if_ether.h>
36#include <linux/ethtool.h>
37
38#include "igb.h"
39
40struct igb_stats {
41 char stat_string[ETH_GSTRING_LEN];
42 int sizeof_stat;
43 int stat_offset;
44};
45
46#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
47 offsetof(struct igb_adapter, m)
48static const struct igb_stats igb_gstrings_stats[] = {
49 { "rx_packets", IGB_STAT(stats.gprc) },
50 { "tx_packets", IGB_STAT(stats.gptc) },
51 { "rx_bytes", IGB_STAT(stats.gorc) },
52 { "tx_bytes", IGB_STAT(stats.gotc) },
53 { "rx_broadcast", IGB_STAT(stats.bprc) },
54 { "tx_broadcast", IGB_STAT(stats.bptc) },
55 { "rx_multicast", IGB_STAT(stats.mprc) },
56 { "tx_multicast", IGB_STAT(stats.mptc) },
57 { "rx_errors", IGB_STAT(net_stats.rx_errors) },
58 { "tx_errors", IGB_STAT(net_stats.tx_errors) },
59 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
60 { "multicast", IGB_STAT(stats.mprc) },
61 { "collisions", IGB_STAT(stats.colc) },
62 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
63 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
64 { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
65 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
66 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
67 { "rx_missed_errors", IGB_STAT(stats.mpc) },
68 { "tx_aborted_errors", IGB_STAT(stats.ecol) },
69 { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
70 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
71 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
72 { "tx_window_errors", IGB_STAT(stats.latecol) },
73 { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
74 { "tx_deferred_ok", IGB_STAT(stats.dc) },
75 { "tx_single_coll_ok", IGB_STAT(stats.scc) },
76 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
77 { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
78 { "tx_restart_queue", IGB_STAT(restart_queue) },
79 { "rx_long_length_errors", IGB_STAT(stats.roc) },
80 { "rx_short_length_errors", IGB_STAT(stats.ruc) },
81 { "rx_align_errors", IGB_STAT(stats.algnerrc) },
82 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
83 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
84 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
85 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
86 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
87 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
88 { "rx_long_byte_count", IGB_STAT(stats.gorc) },
89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
91 { "rx_header_split", IGB_STAT(rx_hdr_split) },
92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96};
97
98#define IGB_QUEUE_STATS_LEN \
99 ((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \
100 ((struct igb_adapter *)netdev->priv)->num_rx_queues : 0) + \
101 (((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \
102 ((struct igb_adapter *)netdev->priv)->num_tx_queues : 0))) * \
103 (sizeof(struct igb_queue_stats) / sizeof(u64)))
104#define IGB_GLOBAL_STATS_LEN \
105 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
106#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
107static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
108 "Register test (offline)", "Eeprom test (offline)",
109 "Interrupt test (offline)", "Loopback test (offline)",
110 "Link test (on/offline)"
111};
112#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
113
114static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
115{
116 struct igb_adapter *adapter = netdev_priv(netdev);
117 struct e1000_hw *hw = &adapter->hw;
118
119 if (hw->phy.media_type == e1000_media_type_copper) {
120
121 ecmd->supported = (SUPPORTED_10baseT_Half |
122 SUPPORTED_10baseT_Full |
123 SUPPORTED_100baseT_Half |
124 SUPPORTED_100baseT_Full |
125 SUPPORTED_1000baseT_Full|
126 SUPPORTED_Autoneg |
127 SUPPORTED_TP);
128 ecmd->advertising = ADVERTISED_TP;
129
130 if (hw->mac.autoneg == 1) {
131 ecmd->advertising |= ADVERTISED_Autoneg;
132 /* the e1000 autoneg seems to match ethtool nicely */
133 ecmd->advertising |= hw->phy.autoneg_advertised;
134 }
135
136 ecmd->port = PORT_TP;
137 ecmd->phy_address = hw->phy.addr;
138 } else {
139 ecmd->supported = (SUPPORTED_1000baseT_Full |
140 SUPPORTED_FIBRE |
141 SUPPORTED_Autoneg);
142
143 ecmd->advertising = (ADVERTISED_1000baseT_Full |
144 ADVERTISED_FIBRE |
145 ADVERTISED_Autoneg);
146
147 ecmd->port = PORT_FIBRE;
148 }
149
150 ecmd->transceiver = XCVR_INTERNAL;
151
152 if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
153
154 adapter->hw.mac.ops.get_speed_and_duplex(hw,
155 &adapter->link_speed,
156 &adapter->link_duplex);
157 ecmd->speed = adapter->link_speed;
158
159 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
160 * and HALF_DUPLEX != DUPLEX_HALF */
161
162 if (adapter->link_duplex == FULL_DUPLEX)
163 ecmd->duplex = DUPLEX_FULL;
164 else
165 ecmd->duplex = DUPLEX_HALF;
166 } else {
167 ecmd->speed = -1;
168 ecmd->duplex = -1;
169 }
170
171 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
172 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
173 return 0;
174}
175
176static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
177{
178 struct igb_adapter *adapter = netdev_priv(netdev);
179 struct e1000_hw *hw = &adapter->hw;
180
181 /* When SoL/IDER sessions are active, autoneg/speed/duplex
182 * cannot be changed */
183 if (igb_check_reset_block(hw)) {
184 dev_err(&adapter->pdev->dev, "Cannot change link "
185 "characteristics when SoL/IDER is active.\n");
186 return -EINVAL;
187 }
188
189 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
190 msleep(1);
191
192 if (ecmd->autoneg == AUTONEG_ENABLE) {
193 hw->mac.autoneg = 1;
194 if (hw->phy.media_type == e1000_media_type_fiber)
195 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
196 ADVERTISED_FIBRE |
197 ADVERTISED_Autoneg;
198 else
199 hw->phy.autoneg_advertised = ecmd->advertising |
200 ADVERTISED_TP |
201 ADVERTISED_Autoneg;
202 ecmd->advertising = hw->phy.autoneg_advertised;
203 } else
204 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
205 clear_bit(__IGB_RESETTING, &adapter->state);
206 return -EINVAL;
207 }
208
209 /* reset the link */
210
211 if (netif_running(adapter->netdev)) {
212 igb_down(adapter);
213 igb_up(adapter);
214 } else
215 igb_reset(adapter);
216
217 clear_bit(__IGB_RESETTING, &adapter->state);
218 return 0;
219}
220
221static void igb_get_pauseparam(struct net_device *netdev,
222 struct ethtool_pauseparam *pause)
223{
224 struct igb_adapter *adapter = netdev_priv(netdev);
225 struct e1000_hw *hw = &adapter->hw;
226
227 pause->autoneg =
228 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
229
230 if (hw->fc.type == e1000_fc_rx_pause)
231 pause->rx_pause = 1;
232 else if (hw->fc.type == e1000_fc_tx_pause)
233 pause->tx_pause = 1;
234 else if (hw->fc.type == e1000_fc_full) {
235 pause->rx_pause = 1;
236 pause->tx_pause = 1;
237 }
238}
239
240static int igb_set_pauseparam(struct net_device *netdev,
241 struct ethtool_pauseparam *pause)
242{
243 struct igb_adapter *adapter = netdev_priv(netdev);
244 struct e1000_hw *hw = &adapter->hw;
245 int retval = 0;
246
247 adapter->fc_autoneg = pause->autoneg;
248
249 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
250 msleep(1);
251
252 if (pause->rx_pause && pause->tx_pause)
253 hw->fc.type = e1000_fc_full;
254 else if (pause->rx_pause && !pause->tx_pause)
255 hw->fc.type = e1000_fc_rx_pause;
256 else if (!pause->rx_pause && pause->tx_pause)
257 hw->fc.type = e1000_fc_tx_pause;
258 else if (!pause->rx_pause && !pause->tx_pause)
259 hw->fc.type = e1000_fc_none;
260
261 hw->fc.original_type = hw->fc.type;
262
263 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
264 if (netif_running(adapter->netdev)) {
265 igb_down(adapter);
266 igb_up(adapter);
267 } else
268 igb_reset(adapter);
269 } else
270 retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
271 igb_setup_link(hw) : igb_force_mac_fc(hw));
272
273 clear_bit(__IGB_RESETTING, &adapter->state);
274 return retval;
275}
276
277static u32 igb_get_rx_csum(struct net_device *netdev)
278{
279 struct igb_adapter *adapter = netdev_priv(netdev);
280 return adapter->rx_csum;
281}
282
283static int igb_set_rx_csum(struct net_device *netdev, u32 data)
284{
285 struct igb_adapter *adapter = netdev_priv(netdev);
286 adapter->rx_csum = data;
287
288 return 0;
289}
290
291static u32 igb_get_tx_csum(struct net_device *netdev)
292{
293 return (netdev->features & NETIF_F_HW_CSUM) != 0;
294}
295
296static int igb_set_tx_csum(struct net_device *netdev, u32 data)
297{
298 if (data)
299 netdev->features |= NETIF_F_HW_CSUM;
300 else
301 netdev->features &= ~NETIF_F_HW_CSUM;
302
303 return 0;
304}
305
306static int igb_set_tso(struct net_device *netdev, u32 data)
307{
308 struct igb_adapter *adapter = netdev_priv(netdev);
309
310 if (data)
311 netdev->features |= NETIF_F_TSO;
312 else
313 netdev->features &= ~NETIF_F_TSO;
314
315 if (data)
316 netdev->features |= NETIF_F_TSO6;
317 else
318 netdev->features &= ~NETIF_F_TSO6;
319
320 dev_info(&adapter->pdev->dev, "TSO is %s\n",
321 data ? "Enabled" : "Disabled");
322 return 0;
323}
324
325static u32 igb_get_msglevel(struct net_device *netdev)
326{
327 struct igb_adapter *adapter = netdev_priv(netdev);
328 return adapter->msg_enable;
329}
330
331static void igb_set_msglevel(struct net_device *netdev, u32 data)
332{
333 struct igb_adapter *adapter = netdev_priv(netdev);
334 adapter->msg_enable = data;
335}
336
337static int igb_get_regs_len(struct net_device *netdev)
338{
339#define IGB_REGS_LEN 551
340 return IGB_REGS_LEN * sizeof(u32);
341}
342
343static void igb_get_regs(struct net_device *netdev,
344 struct ethtool_regs *regs, void *p)
345{
346 struct igb_adapter *adapter = netdev_priv(netdev);
347 struct e1000_hw *hw = &adapter->hw;
348 u32 *regs_buff = p;
349 u8 i;
350
351 memset(p, 0, IGB_REGS_LEN * sizeof(u32));
352
353 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
354
355 /* General Registers */
356 regs_buff[0] = rd32(E1000_CTRL);
357 regs_buff[1] = rd32(E1000_STATUS);
358 regs_buff[2] = rd32(E1000_CTRL_EXT);
359 regs_buff[3] = rd32(E1000_MDIC);
360 regs_buff[4] = rd32(E1000_SCTL);
361 regs_buff[5] = rd32(E1000_CONNSW);
362 regs_buff[6] = rd32(E1000_VET);
363 regs_buff[7] = rd32(E1000_LEDCTL);
364 regs_buff[8] = rd32(E1000_PBA);
365 regs_buff[9] = rd32(E1000_PBS);
366 regs_buff[10] = rd32(E1000_FRTIMER);
367 regs_buff[11] = rd32(E1000_TCPTIMER);
368
369 /* NVM Register */
370 regs_buff[12] = rd32(E1000_EECD);
371
372 /* Interrupt */
373 regs_buff[13] = rd32(E1000_EICR);
374 regs_buff[14] = rd32(E1000_EICS);
375 regs_buff[15] = rd32(E1000_EIMS);
376 regs_buff[16] = rd32(E1000_EIMC);
377 regs_buff[17] = rd32(E1000_EIAC);
378 regs_buff[18] = rd32(E1000_EIAM);
379 regs_buff[19] = rd32(E1000_ICR);
380 regs_buff[20] = rd32(E1000_ICS);
381 regs_buff[21] = rd32(E1000_IMS);
382 regs_buff[22] = rd32(E1000_IMC);
383 regs_buff[23] = rd32(E1000_IAC);
384 regs_buff[24] = rd32(E1000_IAM);
385 regs_buff[25] = rd32(E1000_IMIRVP);
386
387 /* Flow Control */
388 regs_buff[26] = rd32(E1000_FCAL);
389 regs_buff[27] = rd32(E1000_FCAH);
390 regs_buff[28] = rd32(E1000_FCTTV);
391 regs_buff[29] = rd32(E1000_FCRTL);
392 regs_buff[30] = rd32(E1000_FCRTH);
393 regs_buff[31] = rd32(E1000_FCRTV);
394
395 /* Receive */
396 regs_buff[32] = rd32(E1000_RCTL);
397 regs_buff[33] = rd32(E1000_RXCSUM);
398 regs_buff[34] = rd32(E1000_RLPML);
399 regs_buff[35] = rd32(E1000_RFCTL);
400 regs_buff[36] = rd32(E1000_MRQC);
401 regs_buff[37] = rd32(E1000_VMD_CTL);
402
403 /* Transmit */
404 regs_buff[38] = rd32(E1000_TCTL);
405 regs_buff[39] = rd32(E1000_TCTL_EXT);
406 regs_buff[40] = rd32(E1000_TIPG);
407 regs_buff[41] = rd32(E1000_DTXCTL);
408
409 /* Wake Up */
410 regs_buff[42] = rd32(E1000_WUC);
411 regs_buff[43] = rd32(E1000_WUFC);
412 regs_buff[44] = rd32(E1000_WUS);
413 regs_buff[45] = rd32(E1000_IPAV);
414 regs_buff[46] = rd32(E1000_WUPL);
415
416 /* MAC */
417 regs_buff[47] = rd32(E1000_PCS_CFG0);
418 regs_buff[48] = rd32(E1000_PCS_LCTL);
419 regs_buff[49] = rd32(E1000_PCS_LSTAT);
420 regs_buff[50] = rd32(E1000_PCS_ANADV);
421 regs_buff[51] = rd32(E1000_PCS_LPAB);
422 regs_buff[52] = rd32(E1000_PCS_NPTX);
423 regs_buff[53] = rd32(E1000_PCS_LPABNP);
424
425 /* Statistics */
426 regs_buff[54] = adapter->stats.crcerrs;
427 regs_buff[55] = adapter->stats.algnerrc;
428 regs_buff[56] = adapter->stats.symerrs;
429 regs_buff[57] = adapter->stats.rxerrc;
430 regs_buff[58] = adapter->stats.mpc;
431 regs_buff[59] = adapter->stats.scc;
432 regs_buff[60] = adapter->stats.ecol;
433 regs_buff[61] = adapter->stats.mcc;
434 regs_buff[62] = adapter->stats.latecol;
435 regs_buff[63] = adapter->stats.colc;
436 regs_buff[64] = adapter->stats.dc;
437 regs_buff[65] = adapter->stats.tncrs;
438 regs_buff[66] = adapter->stats.sec;
439 regs_buff[67] = adapter->stats.htdpmc;
440 regs_buff[68] = adapter->stats.rlec;
441 regs_buff[69] = adapter->stats.xonrxc;
442 regs_buff[70] = adapter->stats.xontxc;
443 regs_buff[71] = adapter->stats.xoffrxc;
444 regs_buff[72] = adapter->stats.xofftxc;
445 regs_buff[73] = adapter->stats.fcruc;
446 regs_buff[74] = adapter->stats.prc64;
447 regs_buff[75] = adapter->stats.prc127;
448 regs_buff[76] = adapter->stats.prc255;
449 regs_buff[77] = adapter->stats.prc511;
450 regs_buff[78] = adapter->stats.prc1023;
451 regs_buff[79] = adapter->stats.prc1522;
452 regs_buff[80] = adapter->stats.gprc;
453 regs_buff[81] = adapter->stats.bprc;
454 regs_buff[82] = adapter->stats.mprc;
455 regs_buff[83] = adapter->stats.gptc;
456 regs_buff[84] = adapter->stats.gorc;
457 regs_buff[86] = adapter->stats.gotc;
458 regs_buff[88] = adapter->stats.rnbc;
459 regs_buff[89] = adapter->stats.ruc;
460 regs_buff[90] = adapter->stats.rfc;
461 regs_buff[91] = adapter->stats.roc;
462 regs_buff[92] = adapter->stats.rjc;
463 regs_buff[93] = adapter->stats.mgprc;
464 regs_buff[94] = adapter->stats.mgpdc;
465 regs_buff[95] = adapter->stats.mgptc;
466 regs_buff[96] = adapter->stats.tor;
467 regs_buff[98] = adapter->stats.tot;
468 regs_buff[100] = adapter->stats.tpr;
469 regs_buff[101] = adapter->stats.tpt;
470 regs_buff[102] = adapter->stats.ptc64;
471 regs_buff[103] = adapter->stats.ptc127;
472 regs_buff[104] = adapter->stats.ptc255;
473 regs_buff[105] = adapter->stats.ptc511;
474 regs_buff[106] = adapter->stats.ptc1023;
475 regs_buff[107] = adapter->stats.ptc1522;
476 regs_buff[108] = adapter->stats.mptc;
477 regs_buff[109] = adapter->stats.bptc;
478 regs_buff[110] = adapter->stats.tsctc;
479 regs_buff[111] = adapter->stats.iac;
480 regs_buff[112] = adapter->stats.rpthc;
481 regs_buff[113] = adapter->stats.hgptc;
482 regs_buff[114] = adapter->stats.hgorc;
483 regs_buff[116] = adapter->stats.hgotc;
484 regs_buff[118] = adapter->stats.lenerrs;
485 regs_buff[119] = adapter->stats.scvpc;
486 regs_buff[120] = adapter->stats.hrmpc;
487
488 /* These should probably be added to e1000_regs.h instead */
489 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
490 #define E1000_RAL(_i) (0x05400 + ((_i) * 8))
491 #define E1000_RAH(_i) (0x05404 + ((_i) * 8))
492 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
493 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
494 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
495 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
496 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
497 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
498
499 for (i = 0; i < 4; i++)
500 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
501 for (i = 0; i < 4; i++)
502 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
503 for (i = 0; i < 4; i++)
504 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
505 for (i = 0; i < 4; i++)
506 regs_buff[133 + i] = rd32(E1000_RDBAH(i));
507 for (i = 0; i < 4; i++)
508 regs_buff[137 + i] = rd32(E1000_RDLEN(i));
509 for (i = 0; i < 4; i++)
510 regs_buff[141 + i] = rd32(E1000_RDH(i));
511 for (i = 0; i < 4; i++)
512 regs_buff[145 + i] = rd32(E1000_RDT(i));
513 for (i = 0; i < 4; i++)
514 regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
515
516 for (i = 0; i < 10; i++)
517 regs_buff[153 + i] = rd32(E1000_EITR(i));
518 for (i = 0; i < 8; i++)
519 regs_buff[163 + i] = rd32(E1000_IMIR(i));
520 for (i = 0; i < 8; i++)
521 regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
522 for (i = 0; i < 16; i++)
523 regs_buff[179 + i] = rd32(E1000_RAL(i));
524 for (i = 0; i < 16; i++)
525 regs_buff[195 + i] = rd32(E1000_RAH(i));
526
527 for (i = 0; i < 4; i++)
528 regs_buff[211 + i] = rd32(E1000_TDBAL(i));
529 for (i = 0; i < 4; i++)
530 regs_buff[215 + i] = rd32(E1000_TDBAH(i));
531 for (i = 0; i < 4; i++)
532 regs_buff[219 + i] = rd32(E1000_TDLEN(i));
533 for (i = 0; i < 4; i++)
534 regs_buff[223 + i] = rd32(E1000_TDH(i));
535 for (i = 0; i < 4; i++)
536 regs_buff[227 + i] = rd32(E1000_TDT(i));
537 for (i = 0; i < 4; i++)
538 regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
539 for (i = 0; i < 4; i++)
540 regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
541 for (i = 0; i < 4; i++)
542 regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
543 for (i = 0; i < 4; i++)
544 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
545
546 for (i = 0; i < 4; i++)
547 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
548 for (i = 0; i < 4; i++)
549 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
550 for (i = 0; i < 32; i++)
551 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
552 for (i = 0; i < 128; i++)
553 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
554 for (i = 0; i < 128; i++)
555 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
556 for (i = 0; i < 4; i++)
557 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
558
559 regs_buff[547] = rd32(E1000_TDFH);
560 regs_buff[548] = rd32(E1000_TDFT);
561 regs_buff[549] = rd32(E1000_TDFHS);
562 regs_buff[550] = rd32(E1000_TDFPC);
563
564}
565
566static int igb_get_eeprom_len(struct net_device *netdev)
567{
568 struct igb_adapter *adapter = netdev_priv(netdev);
569 return adapter->hw.nvm.word_size * 2;
570}
571
572static int igb_get_eeprom(struct net_device *netdev,
573 struct ethtool_eeprom *eeprom, u8 *bytes)
574{
575 struct igb_adapter *adapter = netdev_priv(netdev);
576 struct e1000_hw *hw = &adapter->hw;
577 u16 *eeprom_buff;
578 int first_word, last_word;
579 int ret_val = 0;
580 u16 i;
581
582 if (eeprom->len == 0)
583 return -EINVAL;
584
585 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
586
587 first_word = eeprom->offset >> 1;
588 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
589
590 eeprom_buff = kmalloc(sizeof(u16) *
591 (last_word - first_word + 1), GFP_KERNEL);
592 if (!eeprom_buff)
593 return -ENOMEM;
594
595 if (hw->nvm.type == e1000_nvm_eeprom_spi)
596 ret_val = hw->nvm.ops.read_nvm(hw, first_word,
597 last_word - first_word + 1,
598 eeprom_buff);
599 else {
600 for (i = 0; i < last_word - first_word + 1; i++) {
601 ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1,
602 &eeprom_buff[i]);
603 if (ret_val)
604 break;
605 }
606 }
607
608 /* Device's eeprom is always little-endian, word addressable */
609 for (i = 0; i < last_word - first_word + 1; i++)
610 le16_to_cpus(&eeprom_buff[i]);
611
612 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
613 eeprom->len);
614 kfree(eeprom_buff);
615
616 return ret_val;
617}
618
619static int igb_set_eeprom(struct net_device *netdev,
620 struct ethtool_eeprom *eeprom, u8 *bytes)
621{
622 struct igb_adapter *adapter = netdev_priv(netdev);
623 struct e1000_hw *hw = &adapter->hw;
624 u16 *eeprom_buff;
625 void *ptr;
626 int max_len, first_word, last_word, ret_val = 0;
627 u16 i;
628
629 if (eeprom->len == 0)
630 return -EOPNOTSUPP;
631
632 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
633 return -EFAULT;
634
635 max_len = hw->nvm.word_size * 2;
636
637 first_word = eeprom->offset >> 1;
638 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
639 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
640 if (!eeprom_buff)
641 return -ENOMEM;
642
643 ptr = (void *)eeprom_buff;
644
645 if (eeprom->offset & 1) {
646 /* need read/modify/write of first changed EEPROM word */
647 /* only the second byte of the word is being modified */
648 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1,
649 &eeprom_buff[0]);
650 ptr++;
651 }
652 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
653 /* need read/modify/write of last changed EEPROM word */
654 /* only the first byte of the word is being modified */
655 ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1,
656 &eeprom_buff[last_word - first_word]);
657 }
658
659 /* Device's eeprom is always little-endian, word addressable */
660 for (i = 0; i < last_word - first_word + 1; i++)
661 le16_to_cpus(&eeprom_buff[i]);
662
663 memcpy(ptr, bytes, eeprom->len);
664
665 for (i = 0; i < last_word - first_word + 1; i++)
666 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
667
668 ret_val = hw->nvm.ops.write_nvm(hw, first_word,
669 last_word - first_word + 1, eeprom_buff);
670
671 /* Update the checksum over the first part of the EEPROM if needed
672 * and flush shadow RAM for 82573 controllers */
673 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
674 igb_update_nvm_checksum(hw);
675
676 kfree(eeprom_buff);
677 return ret_val;
678}
679
680static void igb_get_drvinfo(struct net_device *netdev,
681 struct ethtool_drvinfo *drvinfo)
682{
683 struct igb_adapter *adapter = netdev_priv(netdev);
684 char firmware_version[32];
685 u16 eeprom_data;
686
687 strncpy(drvinfo->driver, igb_driver_name, 32);
688 strncpy(drvinfo->version, igb_driver_version, 32);
689
690 /* EEPROM image version # is reported as firmware version # for
691 * 82575 controllers */
692 adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data);
693 sprintf(firmware_version, "%d.%d-%d",
694 (eeprom_data & 0xF000) >> 12,
695 (eeprom_data & 0x0FF0) >> 4,
696 eeprom_data & 0x000F);
697
698 strncpy(drvinfo->fw_version, firmware_version, 32);
699 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
700 drvinfo->n_stats = IGB_STATS_LEN;
701 drvinfo->testinfo_len = IGB_TEST_LEN;
702 drvinfo->regdump_len = igb_get_regs_len(netdev);
703 drvinfo->eedump_len = igb_get_eeprom_len(netdev);
704}
705
706static void igb_get_ringparam(struct net_device *netdev,
707 struct ethtool_ringparam *ring)
708{
709 struct igb_adapter *adapter = netdev_priv(netdev);
710 struct igb_ring *tx_ring = adapter->tx_ring;
711 struct igb_ring *rx_ring = adapter->rx_ring;
712
713 ring->rx_max_pending = IGB_MAX_RXD;
714 ring->tx_max_pending = IGB_MAX_TXD;
715 ring->rx_mini_max_pending = 0;
716 ring->rx_jumbo_max_pending = 0;
717 ring->rx_pending = rx_ring->count;
718 ring->tx_pending = tx_ring->count;
719 ring->rx_mini_pending = 0;
720 ring->rx_jumbo_pending = 0;
721}
722
723static int igb_set_ringparam(struct net_device *netdev,
724 struct ethtool_ringparam *ring)
725{
726 struct igb_adapter *adapter = netdev_priv(netdev);
727 struct igb_buffer *old_buf;
728 struct igb_buffer *old_rx_buf;
729 void *old_desc;
730 int i, err;
731 u32 new_rx_count, new_tx_count, old_size;
732 dma_addr_t old_dma;
733
734 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
735 return -EINVAL;
736
737 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
738 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
739 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
740
741 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
742 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
743 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
744
745 if ((new_tx_count == adapter->tx_ring->count) &&
746 (new_rx_count == adapter->rx_ring->count)) {
747 /* nothing to do */
748 return 0;
749 }
750
751 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
752 msleep(1);
753
754 if (netif_running(adapter->netdev))
755 igb_down(adapter);
756
757 /*
758 * We can't just free everything and then setup again,
759 * because the ISRs in MSI-X mode get passed pointers
760 * to the tx and rx ring structs.
761 */
762 if (new_tx_count != adapter->tx_ring->count) {
763 for (i = 0; i < adapter->num_tx_queues; i++) {
764 /* Save existing descriptor ring */
765 old_buf = adapter->tx_ring[i].buffer_info;
766 old_desc = adapter->tx_ring[i].desc;
767 old_size = adapter->tx_ring[i].size;
768 old_dma = adapter->tx_ring[i].dma;
769 /* Try to allocate a new one */
770 adapter->tx_ring[i].buffer_info = NULL;
771 adapter->tx_ring[i].desc = NULL;
772 adapter->tx_ring[i].count = new_tx_count;
773 err = igb_setup_tx_resources(adapter,
774 &adapter->tx_ring[i]);
775 if (err) {
776 /* Restore the old one so at least
777 the adapter still works, even if
778 we failed the request */
779 adapter->tx_ring[i].buffer_info = old_buf;
780 adapter->tx_ring[i].desc = old_desc;
781 adapter->tx_ring[i].size = old_size;
782 adapter->tx_ring[i].dma = old_dma;
783 goto err_setup;
784 }
785 /* Free the old buffer manually */
786 vfree(old_buf);
787 pci_free_consistent(adapter->pdev, old_size,
788 old_desc, old_dma);
789 }
790 }
791
792 if (new_rx_count != adapter->rx_ring->count) {
793 for (i = 0; i < adapter->num_rx_queues; i++) {
794
795 old_rx_buf = adapter->rx_ring[i].buffer_info;
796 old_desc = adapter->rx_ring[i].desc;
797 old_size = adapter->rx_ring[i].size;
798 old_dma = adapter->rx_ring[i].dma;
799
800 adapter->rx_ring[i].buffer_info = NULL;
801 adapter->rx_ring[i].desc = NULL;
802 adapter->rx_ring[i].dma = 0;
803 adapter->rx_ring[i].count = new_rx_count;
804 err = igb_setup_rx_resources(adapter,
805 &adapter->rx_ring[i]);
806 if (err) {
807 adapter->rx_ring[i].buffer_info = old_rx_buf;
808 adapter->rx_ring[i].desc = old_desc;
809 adapter->rx_ring[i].size = old_size;
810 adapter->rx_ring[i].dma = old_dma;
811 goto err_setup;
812 }
813
814 vfree(old_rx_buf);
815 pci_free_consistent(adapter->pdev, old_size, old_desc,
816 old_dma);
817 }
818 }
819
820 err = 0;
821err_setup:
822 if (netif_running(adapter->netdev))
823 igb_up(adapter);
824
825 clear_bit(__IGB_RESETTING, &adapter->state);
826 return err;
827}
828
829/* ethtool register test data */
830struct igb_reg_test {
831 u16 reg;
832 u8 array_len;
833 u8 test_type;
834 u32 mask;
835 u32 write;
836};
837
838/* In the hardware, registers are laid out either singly, in arrays
839 * spaced 0x100 bytes apart, or in contiguous tables. We assume
840 * most tests take place on arrays or single registers (handled
841 * as a single-element array) and special-case the tables.
842 * Table tests are always pattern tests.
843 *
844 * We also make provision for some required setup steps by specifying
845 * registers to be written without any read-back testing.
846 */
847
848#define PATTERN_TEST 1
849#define SET_READ_TEST 2
850#define WRITE_NO_TEST 3
851#define TABLE32_TEST 4
852#define TABLE64_TEST_LO 5
853#define TABLE64_TEST_HI 6
854
855/* default register test */
856static struct igb_reg_test reg_test_82575[] = {
857 { E1000_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
858 { E1000_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
859 { E1000_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
860 { E1000_VET, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
861 { E1000_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
862 { E1000_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
863 { E1000_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
864 /* Enable all four RX queues before testing. */
865 { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
866 /* RDH is read-only for 82575, only test RDT. */
867 { E1000_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
868 { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
869 { E1000_FCRTH, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
870 { E1000_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
871 { E1000_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
872 { E1000_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
873 { E1000_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
874 { E1000_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
875 { E1000_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
876 { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
877 { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
878 { E1000_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
879 { E1000_TXCW, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
880 { E1000_RA, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
881 { E1000_RA, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
882 { E1000_MTA, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
883 { 0, 0, 0, 0 }
884};
885
886static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
887 int reg, u32 mask, u32 write)
888{
889 u32 pat, val;
890 u32 _test[] =
891 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
892 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
893 writel((_test[pat] & write), (adapter->hw.hw_addr + reg));
894 val = readl(adapter->hw.hw_addr + reg);
895 if (val != (_test[pat] & write & mask)) {
896 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
897 "failed: got 0x%08X expected 0x%08X\n",
898 reg, val, (_test[pat] & write & mask));
899 *data = reg;
900 return 1;
901 }
902 }
903 return 0;
904}
905
906static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
907 int reg, u32 mask, u32 write)
908{
909 u32 val;
910 writel((write & mask), (adapter->hw.hw_addr + reg));
911 val = readl(adapter->hw.hw_addr + reg);
912 if ((write & mask) != (val & mask)) {
913 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
914 " got 0x%08X expected 0x%08X\n", reg,
915 (val & mask), (write & mask));
916 *data = reg;
917 return 1;
918 }
919 return 0;
920}
921
922#define REG_PATTERN_TEST(reg, mask, write) \
923 do { \
924 if (reg_pattern_test(adapter, data, reg, mask, write)) \
925 return 1; \
926 } while (0)
927
928#define REG_SET_AND_CHECK(reg, mask, write) \
929 do { \
930 if (reg_set_and_check(adapter, data, reg, mask, write)) \
931 return 1; \
932 } while (0)
933
934static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
935{
936 struct e1000_hw *hw = &adapter->hw;
937 struct igb_reg_test *test;
938 u32 value, before, after;
939 u32 i, toggle;
940
941 toggle = 0x7FFFF3FF;
942 test = reg_test_82575;
943
944 /* Because the status register is such a special case,
945 * we handle it separately from the rest of the register
946 * tests. Some bits are read-only, some toggle, and some
947 * are writable on newer MACs.
948 */
949 before = rd32(E1000_STATUS);
950 value = (rd32(E1000_STATUS) & toggle);
951 wr32(E1000_STATUS, toggle);
952 after = rd32(E1000_STATUS) & toggle;
953 if (value != after) {
954 dev_err(&adapter->pdev->dev, "failed STATUS register test "
955 "got: 0x%08X expected: 0x%08X\n", after, value);
956 *data = 1;
957 return 1;
958 }
959 /* restore previous status */
960 wr32(E1000_STATUS, before);
961
962 /* Perform the remainder of the register test, looping through
963 * the test table until we either fail or reach the null entry.
964 */
965 while (test->reg) {
966 for (i = 0; i < test->array_len; i++) {
967 switch (test->test_type) {
968 case PATTERN_TEST:
969 REG_PATTERN_TEST(test->reg + (i * 0x100),
970 test->mask,
971 test->write);
972 break;
973 case SET_READ_TEST:
974 REG_SET_AND_CHECK(test->reg + (i * 0x100),
975 test->mask,
976 test->write);
977 break;
978 case WRITE_NO_TEST:
979 writel(test->write,
980 (adapter->hw.hw_addr + test->reg)
981 + (i * 0x100));
982 break;
983 case TABLE32_TEST:
984 REG_PATTERN_TEST(test->reg + (i * 4),
985 test->mask,
986 test->write);
987 break;
988 case TABLE64_TEST_LO:
989 REG_PATTERN_TEST(test->reg + (i * 8),
990 test->mask,
991 test->write);
992 break;
993 case TABLE64_TEST_HI:
994 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
995 test->mask,
996 test->write);
997 break;
998 }
999 }
1000 test++;
1001 }
1002
1003 *data = 0;
1004 return 0;
1005}
1006
1007static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1008{
1009 u16 temp;
1010 u16 checksum = 0;
1011 u16 i;
1012
1013 *data = 0;
1014 /* Read and add up the contents of the EEPROM */
1015 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1016 if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp))
1017 < 0) {
1018 *data = 1;
1019 break;
1020 }
1021 checksum += temp;
1022 }
1023
1024 /* If Checksum is not Correct return error else test passed */
1025 if ((checksum != (u16) NVM_SUM) && !(*data))
1026 *data = 2;
1027
1028 return *data;
1029}
1030
1031static irqreturn_t igb_test_intr(int irq, void *data)
1032{
1033 struct net_device *netdev = (struct net_device *) data;
1034 struct igb_adapter *adapter = netdev_priv(netdev);
1035 struct e1000_hw *hw = &adapter->hw;
1036
1037 adapter->test_icr |= rd32(E1000_ICR);
1038
1039 return IRQ_HANDLED;
1040}
1041
1042static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1043{
1044 struct e1000_hw *hw = &adapter->hw;
1045 struct net_device *netdev = adapter->netdev;
1046 u32 mask, i = 0, shared_int = true;
1047 u32 irq = adapter->pdev->irq;
1048
1049 *data = 0;
1050
1051 /* Hook up test interrupt handler just for this test */
1052 if (adapter->msix_entries) {
1053 /* NOTE: we don't test MSI-X interrupts here, yet */
1054 return 0;
1055 } else if (adapter->msi_enabled) {
1056 shared_int = false;
1057 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1058 *data = 1;
1059 return -1;
1060 }
1061 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1062 netdev->name, netdev)) {
1063 shared_int = false;
1064 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1065 netdev->name, netdev)) {
1066 *data = 1;
1067 return -1;
1068 }
1069 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1070 (shared_int ? "shared" : "unshared"));
1071
1072 /* Disable all the interrupts */
1073 wr32(E1000_IMC, 0xFFFFFFFF);
1074 msleep(10);
1075
1076 /* Test each interrupt */
1077 for (; i < 10; i++) {
1078 /* Interrupt to test */
1079 mask = 1 << i;
1080
1081 if (!shared_int) {
1082 /* Disable the interrupt to be reported in
1083 * the cause register and then force the same
1084 * interrupt and see if one gets posted. If
1085 * an interrupt was posted to the bus, the
1086 * test failed.
1087 */
1088 adapter->test_icr = 0;
1089 wr32(E1000_IMC, ~mask & 0x00007FFF);
1090 wr32(E1000_ICS, ~mask & 0x00007FFF);
1091 msleep(10);
1092
1093 if (adapter->test_icr & mask) {
1094 *data = 3;
1095 break;
1096 }
1097 }
1098
1099 /* Enable the interrupt to be reported in
1100 * the cause register and then force the same
1101 * interrupt and see if one gets posted. If
1102 * an interrupt was not posted to the bus, the
1103 * test failed.
1104 */
1105 adapter->test_icr = 0;
1106 wr32(E1000_IMS, mask);
1107 wr32(E1000_ICS, mask);
1108 msleep(10);
1109
1110 if (!(adapter->test_icr & mask)) {
1111 *data = 4;
1112 break;
1113 }
1114
1115 if (!shared_int) {
1116 /* Disable the other interrupts to be reported in
1117 * the cause register and then force the other
1118 * interrupts and see if any get posted. If
1119 * an interrupt was posted to the bus, the
1120 * test failed.
1121 */
1122 adapter->test_icr = 0;
1123 wr32(E1000_IMC, ~mask & 0x00007FFF);
1124 wr32(E1000_ICS, ~mask & 0x00007FFF);
1125 msleep(10);
1126
1127 if (adapter->test_icr) {
1128 *data = 5;
1129 break;
1130 }
1131 }
1132 }
1133
1134 /* Disable all the interrupts */
1135 wr32(E1000_IMC, 0xFFFFFFFF);
1136 msleep(10);
1137
1138 /* Unhook test interrupt handler */
1139 free_irq(irq, netdev);
1140
1141 return *data;
1142}
1143
1144static void igb_free_desc_rings(struct igb_adapter *adapter)
1145{
1146 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1147 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1148 struct pci_dev *pdev = adapter->pdev;
1149 int i;
1150
1151 if (tx_ring->desc && tx_ring->buffer_info) {
1152 for (i = 0; i < tx_ring->count; i++) {
1153 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1154 if (buf->dma)
1155 pci_unmap_single(pdev, buf->dma, buf->length,
1156 PCI_DMA_TODEVICE);
1157 if (buf->skb)
1158 dev_kfree_skb(buf->skb);
1159 }
1160 }
1161
1162 if (rx_ring->desc && rx_ring->buffer_info) {
1163 for (i = 0; i < rx_ring->count; i++) {
1164 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1165 if (buf->dma)
1166 pci_unmap_single(pdev, buf->dma,
1167 IGB_RXBUFFER_2048,
1168 PCI_DMA_FROMDEVICE);
1169 if (buf->skb)
1170 dev_kfree_skb(buf->skb);
1171 }
1172 }
1173
1174 if (tx_ring->desc) {
1175 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1176 tx_ring->dma);
1177 tx_ring->desc = NULL;
1178 }
1179 if (rx_ring->desc) {
1180 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1181 rx_ring->dma);
1182 rx_ring->desc = NULL;
1183 }
1184
1185 kfree(tx_ring->buffer_info);
1186 tx_ring->buffer_info = NULL;
1187 kfree(rx_ring->buffer_info);
1188 rx_ring->buffer_info = NULL;
1189
1190 return;
1191}
1192
1193static int igb_setup_desc_rings(struct igb_adapter *adapter)
1194{
1195 struct e1000_hw *hw = &adapter->hw;
1196 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1197 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1198 struct pci_dev *pdev = adapter->pdev;
1199 u32 rctl;
1200 int i, ret_val;
1201
1202 /* Setup Tx descriptor ring and Tx buffers */
1203
1204 if (!tx_ring->count)
1205 tx_ring->count = IGB_DEFAULT_TXD;
1206
1207 tx_ring->buffer_info = kcalloc(tx_ring->count,
1208 sizeof(struct igb_buffer),
1209 GFP_KERNEL);
1210 if (!tx_ring->buffer_info) {
1211 ret_val = 1;
1212 goto err_nomem;
1213 }
1214
1215 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1216 tx_ring->size = ALIGN(tx_ring->size, 4096);
1217 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1218 &tx_ring->dma);
1219 if (!tx_ring->desc) {
1220 ret_val = 2;
1221 goto err_nomem;
1222 }
1223 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1224
1225 wr32(E1000_TDBAL(0),
1226 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1227 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1228 wr32(E1000_TDLEN(0),
1229 tx_ring->count * sizeof(struct e1000_tx_desc));
1230 wr32(E1000_TDH(0), 0);
1231 wr32(E1000_TDT(0), 0);
1232 wr32(E1000_TCTL,
1233 E1000_TCTL_PSP | E1000_TCTL_EN |
1234 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1235 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1236
1237 for (i = 0; i < tx_ring->count; i++) {
1238 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1239 struct sk_buff *skb;
1240 unsigned int size = 1024;
1241
1242 skb = alloc_skb(size, GFP_KERNEL);
1243 if (!skb) {
1244 ret_val = 3;
1245 goto err_nomem;
1246 }
1247 skb_put(skb, size);
1248 tx_ring->buffer_info[i].skb = skb;
1249 tx_ring->buffer_info[i].length = skb->len;
1250 tx_ring->buffer_info[i].dma =
1251 pci_map_single(pdev, skb->data, skb->len,
1252 PCI_DMA_TODEVICE);
1253 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1254 tx_desc->lower.data = cpu_to_le32(skb->len);
1255 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1256 E1000_TXD_CMD_IFCS |
1257 E1000_TXD_CMD_RS);
1258 tx_desc->upper.data = 0;
1259 }
1260
1261 /* Setup Rx descriptor ring and Rx buffers */
1262
1263 if (!rx_ring->count)
1264 rx_ring->count = IGB_DEFAULT_RXD;
1265
1266 rx_ring->buffer_info = kcalloc(rx_ring->count,
1267 sizeof(struct igb_buffer),
1268 GFP_KERNEL);
1269 if (!rx_ring->buffer_info) {
1270 ret_val = 4;
1271 goto err_nomem;
1272 }
1273
1274 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1275 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1276 &rx_ring->dma);
1277 if (!rx_ring->desc) {
1278 ret_val = 5;
1279 goto err_nomem;
1280 }
1281 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1282
1283 rctl = rd32(E1000_RCTL);
1284 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1285 wr32(E1000_RDBAL(0),
1286 ((u64) rx_ring->dma & 0xFFFFFFFF));
1287 wr32(E1000_RDBAH(0),
1288 ((u64) rx_ring->dma >> 32));
1289 wr32(E1000_RDLEN(0), rx_ring->size);
1290 wr32(E1000_RDH(0), 0);
1291 wr32(E1000_RDT(0), 0);
1292 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1293 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1294 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1295 wr32(E1000_RCTL, rctl);
1296 wr32(E1000_SRRCTL(0), 0);
1297
1298 for (i = 0; i < rx_ring->count; i++) {
1299 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
1300 struct sk_buff *skb;
1301
1302 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1303 GFP_KERNEL);
1304 if (!skb) {
1305 ret_val = 6;
1306 goto err_nomem;
1307 }
1308 skb_reserve(skb, NET_IP_ALIGN);
1309 rx_ring->buffer_info[i].skb = skb;
1310 rx_ring->buffer_info[i].dma =
1311 pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048,
1312 PCI_DMA_FROMDEVICE);
1313 rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
1314 memset(skb->data, 0x00, skb->len);
1315 }
1316
1317 return 0;
1318
1319err_nomem:
1320 igb_free_desc_rings(adapter);
1321 return ret_val;
1322}
1323
1324static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1325{
1326 struct e1000_hw *hw = &adapter->hw;
1327
1328 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1329 hw->phy.ops.write_phy_reg(hw, 29, 0x001F);
1330 hw->phy.ops.write_phy_reg(hw, 30, 0x8FFC);
1331 hw->phy.ops.write_phy_reg(hw, 29, 0x001A);
1332 hw->phy.ops.write_phy_reg(hw, 30, 0x8FF0);
1333}
1334
1335static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1336{
1337 struct e1000_hw *hw = &adapter->hw;
1338 u32 ctrl_reg = 0;
1339 u32 stat_reg = 0;
1340
1341 hw->mac.autoneg = false;
1342
1343 if (hw->phy.type == e1000_phy_m88) {
1344 /* Auto-MDI/MDIX Off */
1345 hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1346 /* reset to update Auto-MDI/MDIX */
1347 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x9140);
1348 /* autoneg off */
1349 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140);
1350 }
1351
1352 ctrl_reg = rd32(E1000_CTRL);
1353
1354 /* force 1000, set loopback */
1355 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140);
1356
1357 /* Now set up the MAC to the same speed/duplex as the PHY. */
1358 ctrl_reg = rd32(E1000_CTRL);
1359 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1360 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1361 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1362 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1363 E1000_CTRL_FD); /* Force Duplex to FULL */
1364
1365 if (hw->phy.media_type == e1000_media_type_copper &&
1366 hw->phy.type == e1000_phy_m88)
1367 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1368 else {
1369 /* Set the ILOS bit on the fiber Nic if half duplex link is
1370 * detected. */
1371 stat_reg = rd32(E1000_STATUS);
1372 if ((stat_reg & E1000_STATUS_FD) == 0)
1373 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1374 }
1375
1376 wr32(E1000_CTRL, ctrl_reg);
1377
1378 /* Disable the receiver on the PHY so when a cable is plugged in, the
1379 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1380 */
1381 if (hw->phy.type == e1000_phy_m88)
1382 igb_phy_disable_receiver(adapter);
1383
1384 udelay(500);
1385
1386 return 0;
1387}
1388
1389static int igb_set_phy_loopback(struct igb_adapter *adapter)
1390{
1391 return igb_integrated_phy_loopback(adapter);
1392}
1393
1394static int igb_setup_loopback_test(struct igb_adapter *adapter)
1395{
1396 struct e1000_hw *hw = &adapter->hw;
1397 u32 rctl;
1398
1399 if (hw->phy.media_type == e1000_media_type_fiber ||
1400 hw->phy.media_type == e1000_media_type_internal_serdes) {
1401 rctl = rd32(E1000_RCTL);
1402 rctl |= E1000_RCTL_LBM_TCVR;
1403 wr32(E1000_RCTL, rctl);
1404 return 0;
1405 } else if (hw->phy.media_type == e1000_media_type_copper) {
1406 return igb_set_phy_loopback(adapter);
1407 }
1408
1409 return 7;
1410}
1411
1412static void igb_loopback_cleanup(struct igb_adapter *adapter)
1413{
1414 struct e1000_hw *hw = &adapter->hw;
1415 u32 rctl;
1416 u16 phy_reg;
1417
1418 rctl = rd32(E1000_RCTL);
1419 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1420 wr32(E1000_RCTL, rctl);
1421
1422 hw->mac.autoneg = true;
1423 hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1424 if (phy_reg & MII_CR_LOOPBACK) {
1425 phy_reg &= ~MII_CR_LOOPBACK;
1426 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg);
1427 igb_phy_sw_reset(hw);
1428 }
1429}
1430
1431static void igb_create_lbtest_frame(struct sk_buff *skb,
1432 unsigned int frame_size)
1433{
1434 memset(skb->data, 0xFF, frame_size);
1435 frame_size &= ~1;
1436 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1437 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1438 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1439}
1440
1441static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1442{
1443 frame_size &= ~1;
1444 if (*(skb->data + 3) == 0xFF)
1445 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1446 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1447 return 0;
1448 return 13;
1449}
1450
1451static int igb_run_loopback_test(struct igb_adapter *adapter)
1452{
1453 struct e1000_hw *hw = &adapter->hw;
1454 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1455 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1456 struct pci_dev *pdev = adapter->pdev;
1457 int i, j, k, l, lc, good_cnt;
1458 int ret_val = 0;
1459 unsigned long time;
1460
1461 wr32(E1000_RDT(0), rx_ring->count - 1);
1462
1463 /* Calculate the loop count based on the largest descriptor ring
1464 * The idea is to wrap the largest ring a number of times using 64
1465 * send/receive pairs during each loop
1466 */
1467
1468 if (rx_ring->count <= tx_ring->count)
1469 lc = ((tx_ring->count / 64) * 2) + 1;
1470 else
1471 lc = ((rx_ring->count / 64) * 2) + 1;
1472
1473 k = l = 0;
1474 for (j = 0; j <= lc; j++) { /* loop count loop */
1475 for (i = 0; i < 64; i++) { /* send the packets */
1476 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1477 1024);
1478 pci_dma_sync_single_for_device(pdev,
1479 tx_ring->buffer_info[k].dma,
1480 tx_ring->buffer_info[k].length,
1481 PCI_DMA_TODEVICE);
1482 k++;
1483 if (k == tx_ring->count)
1484 k = 0;
1485 }
1486 wr32(E1000_TDT(0), k);
1487 msleep(200);
1488 time = jiffies; /* set the start time for the receive */
1489 good_cnt = 0;
1490 do { /* receive the sent packets */
1491 pci_dma_sync_single_for_cpu(pdev,
1492 rx_ring->buffer_info[l].dma,
1493 IGB_RXBUFFER_2048,
1494 PCI_DMA_FROMDEVICE);
1495
1496 ret_val = igb_check_lbtest_frame(
1497 rx_ring->buffer_info[l].skb, 1024);
1498 if (!ret_val)
1499 good_cnt++;
1500 l++;
1501 if (l == rx_ring->count)
1502 l = 0;
1503 /* time + 20 msecs (200 msecs on 2.4) is more than
1504 * enough time to complete the receives, if it's
1505 * exceeded, break and error off
1506 */
1507 } while (good_cnt < 64 && jiffies < (time + 20));
1508 if (good_cnt != 64) {
1509 ret_val = 13; /* ret_val is the same as mis-compare */
1510 break;
1511 }
1512 if (jiffies >= (time + 20)) {
1513 ret_val = 14; /* error code for time out error */
1514 break;
1515 }
1516 } /* end loop count loop */
1517 return ret_val;
1518}
1519
1520static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1521{
1522 /* PHY loopback cannot be performed if SoL/IDER
1523 * sessions are active */
1524 if (igb_check_reset_block(&adapter->hw)) {
1525 dev_err(&adapter->pdev->dev,
1526 "Cannot do PHY loopback test "
1527 "when SoL/IDER is active.\n");
1528 *data = 0;
1529 goto out;
1530 }
1531 *data = igb_setup_desc_rings(adapter);
1532 if (*data)
1533 goto out;
1534 *data = igb_setup_loopback_test(adapter);
1535 if (*data)
1536 goto err_loopback;
1537 *data = igb_run_loopback_test(adapter);
1538 igb_loopback_cleanup(adapter);
1539
1540err_loopback:
1541 igb_free_desc_rings(adapter);
1542out:
1543 return *data;
1544}
1545
1546static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1547{
1548 struct e1000_hw *hw = &adapter->hw;
1549 *data = 0;
1550 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1551 int i = 0;
1552 hw->mac.serdes_has_link = false;
1553
1554 /* On some blade server designs, link establishment
1555 * could take as long as 2-3 minutes */
1556 do {
1557 hw->mac.ops.check_for_link(&adapter->hw);
1558 if (hw->mac.serdes_has_link)
1559 return *data;
1560 msleep(20);
1561 } while (i++ < 3750);
1562
1563 *data = 1;
1564 } else {
1565 hw->mac.ops.check_for_link(&adapter->hw);
1566 if (hw->mac.autoneg)
1567 msleep(4000);
1568
1569 if (!(rd32(E1000_STATUS) &
1570 E1000_STATUS_LU))
1571 *data = 1;
1572 }
1573 return *data;
1574}
1575
1576static void igb_diag_test(struct net_device *netdev,
1577 struct ethtool_test *eth_test, u64 *data)
1578{
1579 struct igb_adapter *adapter = netdev_priv(netdev);
1580 u16 autoneg_advertised;
1581 u8 forced_speed_duplex, autoneg;
1582 bool if_running = netif_running(netdev);
1583
1584 set_bit(__IGB_TESTING, &adapter->state);
1585 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1586 /* Offline tests */
1587
1588 /* save speed, duplex, autoneg settings */
1589 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1590 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1591 autoneg = adapter->hw.mac.autoneg;
1592
1593 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1594
1595 /* Link test performed before hardware reset so autoneg doesn't
1596 * interfere with test result */
1597 if (igb_link_test(adapter, &data[4]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED;
1599
1600 if (if_running)
1601 /* indicate we're in test mode */
1602 dev_close(netdev);
1603 else
1604 igb_reset(adapter);
1605
1606 if (igb_reg_test(adapter, &data[0]))
1607 eth_test->flags |= ETH_TEST_FL_FAILED;
1608
1609 igb_reset(adapter);
1610 if (igb_eeprom_test(adapter, &data[1]))
1611 eth_test->flags |= ETH_TEST_FL_FAILED;
1612
1613 igb_reset(adapter);
1614 if (igb_intr_test(adapter, &data[2]))
1615 eth_test->flags |= ETH_TEST_FL_FAILED;
1616
1617 igb_reset(adapter);
1618 if (igb_loopback_test(adapter, &data[3]))
1619 eth_test->flags |= ETH_TEST_FL_FAILED;
1620
1621 /* restore speed, duplex, autoneg settings */
1622 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1623 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1624 adapter->hw.mac.autoneg = autoneg;
1625
1626 /* force this routine to wait until autoneg complete/timeout */
1627 adapter->hw.phy.autoneg_wait_to_complete = true;
1628 igb_reset(adapter);
1629 adapter->hw.phy.autoneg_wait_to_complete = false;
1630
1631 clear_bit(__IGB_TESTING, &adapter->state);
1632 if (if_running)
1633 dev_open(netdev);
1634 } else {
1635 dev_info(&adapter->pdev->dev, "online testing starting\n");
1636 /* Online tests */
1637 if (igb_link_test(adapter, &data[4]))
1638 eth_test->flags |= ETH_TEST_FL_FAILED;
1639
1640 /* Online tests aren't run; pass by default */
1641 data[0] = 0;
1642 data[1] = 0;
1643 data[2] = 0;
1644 data[3] = 0;
1645
1646 clear_bit(__IGB_TESTING, &adapter->state);
1647 }
1648 msleep_interruptible(4 * 1000);
1649}
1650
1651static int igb_wol_exclusion(struct igb_adapter *adapter,
1652 struct ethtool_wolinfo *wol)
1653{
1654 struct e1000_hw *hw = &adapter->hw;
1655 int retval = 1; /* fail by default */
1656
1657 switch (hw->device_id) {
1658 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1659 /* WoL not supported */
1660 wol->supported = 0;
1661 break;
1662 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1663 /* Wake events not supported on port B */
1664 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1665 wol->supported = 0;
1666 break;
1667 }
1668 /* return success for non excluded adapter ports */
1669 retval = 0;
1670 break;
1671 default:
1672 /* dual port cards only support WoL on port A from now on
1673 * unless it was enabled in the eeprom for port B
1674 * so exclude FUNC_1 ports from having WoL enabled */
1675 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
1676 !adapter->eeprom_wol) {
1677 wol->supported = 0;
1678 break;
1679 }
1680
1681 retval = 0;
1682 }
1683
1684 return retval;
1685}
1686
1687static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1688{
1689 struct igb_adapter *adapter = netdev_priv(netdev);
1690
1691 wol->supported = WAKE_UCAST | WAKE_MCAST |
1692 WAKE_BCAST | WAKE_MAGIC;
1693 wol->wolopts = 0;
1694
1695 /* this function will set ->supported = 0 and return 1 if wol is not
1696 * supported by this hardware */
1697 if (igb_wol_exclusion(adapter, wol))
1698 return;
1699
1700 /* apply any specific unsupported masks here */
1701 switch (adapter->hw.device_id) {
1702 default:
1703 break;
1704 }
1705
1706 if (adapter->wol & E1000_WUFC_EX)
1707 wol->wolopts |= WAKE_UCAST;
1708 if (adapter->wol & E1000_WUFC_MC)
1709 wol->wolopts |= WAKE_MCAST;
1710 if (adapter->wol & E1000_WUFC_BC)
1711 wol->wolopts |= WAKE_BCAST;
1712 if (adapter->wol & E1000_WUFC_MAG)
1713 wol->wolopts |= WAKE_MAGIC;
1714
1715 return;
1716}
1717
1718static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1719{
1720 struct igb_adapter *adapter = netdev_priv(netdev);
1721 struct e1000_hw *hw = &adapter->hw;
1722
1723 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1724 return -EOPNOTSUPP;
1725
1726 if (igb_wol_exclusion(adapter, wol))
1727 return wol->wolopts ? -EOPNOTSUPP : 0;
1728
1729 switch (hw->device_id) {
1730 default:
1731 break;
1732 }
1733
1734 /* these settings will always override what we currently have */
1735 adapter->wol = 0;
1736
1737 if (wol->wolopts & WAKE_UCAST)
1738 adapter->wol |= E1000_WUFC_EX;
1739 if (wol->wolopts & WAKE_MCAST)
1740 adapter->wol |= E1000_WUFC_MC;
1741 if (wol->wolopts & WAKE_BCAST)
1742 adapter->wol |= E1000_WUFC_BC;
1743 if (wol->wolopts & WAKE_MAGIC)
1744 adapter->wol |= E1000_WUFC_MAG;
1745
1746 return 0;
1747}
1748
1749/* toggle LED 4 times per second = 2 "blinks" per second */
1750#define IGB_ID_INTERVAL (HZ/4)
1751
1752/* bit defines for adapter->led_status */
1753#define IGB_LED_ON 0
1754
1755static int igb_phys_id(struct net_device *netdev, u32 data)
1756{
1757 struct igb_adapter *adapter = netdev_priv(netdev);
1758 struct e1000_hw *hw = &adapter->hw;
1759
1760 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1761 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
1762
1763 igb_blink_led(hw);
1764 msleep_interruptible(data * 1000);
1765
1766 igb_led_off(hw);
1767 clear_bit(IGB_LED_ON, &adapter->led_status);
1768 igb_cleanup_led(hw);
1769
1770 return 0;
1771}
1772
1773static int igb_set_coalesce(struct net_device *netdev,
1774 struct ethtool_coalesce *ec)
1775{
1776 struct igb_adapter *adapter = netdev_priv(netdev);
1777
1778 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1779 ((ec->rx_coalesce_usecs > 3) &&
1780 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1781 (ec->rx_coalesce_usecs == 2))
1782 return -EINVAL;
1783
1784 /* convert to rate of irq's per second */
1785 if (ec->rx_coalesce_usecs <= 3)
1786 adapter->itr_setting = ec->rx_coalesce_usecs;
1787 else
1788 adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs);
1789
1790 if (netif_running(netdev))
1791 igb_reinit_locked(adapter);
1792
1793 return 0;
1794}
1795
1796static int igb_get_coalesce(struct net_device *netdev,
1797 struct ethtool_coalesce *ec)
1798{
1799 struct igb_adapter *adapter = netdev_priv(netdev);
1800
1801 if (adapter->itr_setting <= 3)
1802 ec->rx_coalesce_usecs = adapter->itr_setting;
1803 else
1804 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1805
1806 return 0;
1807}
1808
1809
1810static int igb_nway_reset(struct net_device *netdev)
1811{
1812 struct igb_adapter *adapter = netdev_priv(netdev);
1813 if (netif_running(netdev))
1814 igb_reinit_locked(adapter);
1815 return 0;
1816}
1817
1818static int igb_get_sset_count(struct net_device *netdev, int sset)
1819{
1820 switch (sset) {
1821 case ETH_SS_STATS:
1822 return IGB_STATS_LEN;
1823 case ETH_SS_TEST:
1824 return IGB_TEST_LEN;
1825 default:
1826 return -ENOTSUPP;
1827 }
1828}
1829
1830static void igb_get_ethtool_stats(struct net_device *netdev,
1831 struct ethtool_stats *stats, u64 *data)
1832{
1833 struct igb_adapter *adapter = netdev_priv(netdev);
1834 u64 *queue_stat;
1835 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1836 int j;
1837 int i;
1838
1839 igb_update_stats(adapter);
1840 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1841 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
1842 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1843 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1844 }
1845 for (j = 0; j < adapter->num_rx_queues; j++) {
1846 int k;
1847 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1848 for (k = 0; k < stat_count; k++)
1849 data[i + k] = queue_stat[k];
1850 i += k;
1851 }
1852}
1853
1854static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1855{
1856 struct igb_adapter *adapter = netdev_priv(netdev);
1857 u8 *p = data;
1858 int i;
1859
1860 switch (stringset) {
1861 case ETH_SS_TEST:
1862 memcpy(data, *igb_gstrings_test,
1863 IGB_TEST_LEN*ETH_GSTRING_LEN);
1864 break;
1865 case ETH_SS_STATS:
1866 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1867 memcpy(p, igb_gstrings_stats[i].stat_string,
1868 ETH_GSTRING_LEN);
1869 p += ETH_GSTRING_LEN;
1870 }
1871 for (i = 0; i < adapter->num_tx_queues; i++) {
1872 sprintf(p, "tx_queue_%u_packets", i);
1873 p += ETH_GSTRING_LEN;
1874 sprintf(p, "tx_queue_%u_bytes", i);
1875 p += ETH_GSTRING_LEN;
1876 }
1877 for (i = 0; i < adapter->num_rx_queues; i++) {
1878 sprintf(p, "rx_queue_%u_packets", i);
1879 p += ETH_GSTRING_LEN;
1880 sprintf(p, "rx_queue_%u_bytes", i);
1881 p += ETH_GSTRING_LEN;
1882 }
1883/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
1884 break;
1885 }
1886}
1887
1888static struct ethtool_ops igb_ethtool_ops = {
1889 .get_settings = igb_get_settings,
1890 .set_settings = igb_set_settings,
1891 .get_drvinfo = igb_get_drvinfo,
1892 .get_regs_len = igb_get_regs_len,
1893 .get_regs = igb_get_regs,
1894 .get_wol = igb_get_wol,
1895 .set_wol = igb_set_wol,
1896 .get_msglevel = igb_get_msglevel,
1897 .set_msglevel = igb_set_msglevel,
1898 .nway_reset = igb_nway_reset,
1899 .get_link = ethtool_op_get_link,
1900 .get_eeprom_len = igb_get_eeprom_len,
1901 .get_eeprom = igb_get_eeprom,
1902 .set_eeprom = igb_set_eeprom,
1903 .get_ringparam = igb_get_ringparam,
1904 .set_ringparam = igb_set_ringparam,
1905 .get_pauseparam = igb_get_pauseparam,
1906 .set_pauseparam = igb_set_pauseparam,
1907 .get_rx_csum = igb_get_rx_csum,
1908 .set_rx_csum = igb_set_rx_csum,
1909 .get_tx_csum = igb_get_tx_csum,
1910 .set_tx_csum = igb_set_tx_csum,
1911 .get_sg = ethtool_op_get_sg,
1912 .set_sg = ethtool_op_set_sg,
1913 .get_tso = ethtool_op_get_tso,
1914 .set_tso = igb_set_tso,
1915 .self_test = igb_diag_test,
1916 .get_strings = igb_get_strings,
1917 .phys_id = igb_phys_id,
1918 .get_sset_count = igb_get_sset_count,
1919 .get_ethtool_stats = igb_get_ethtool_stats,
1920 .get_coalesce = igb_get_coalesce,
1921 .set_coalesce = igb_set_coalesce,
1922};
1923
1924void igb_set_ethtool_ops(struct net_device *netdev)
1925{
1926 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
1927}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
new file mode 100644
index 000000000000..f3c144d5d72f
--- /dev/null
+++ b/drivers/net/igb/igb_main.c
@@ -0,0 +1,4138 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
34#include <linux/tcp.h>
35#include <linux/ipv6.h>
36#include <net/checksum.h>
37#include <net/ip6_checksum.h>
38#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/if_ether.h>
45
46#include "igb.h"
47
48#define DRV_VERSION "1.0.8-k2"
49char igb_driver_name[] = "igb";
50char igb_driver_version[] = DRV_VERSION;
51static const char igb_driver_string[] =
52 "Intel(R) Gigabit Ethernet Network Driver";
53static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation.";
54
55
56static const struct e1000_info *igb_info_tbl[] = {
57 [board_82575] = &e1000_82575_info,
58};
59
60static struct pci_device_id igb_pci_tbl[] = {
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
64 /* required last entry */
65 {0, }
66};
67
68MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
69
70void igb_reset(struct igb_adapter *);
71static int igb_setup_all_tx_resources(struct igb_adapter *);
72static int igb_setup_all_rx_resources(struct igb_adapter *);
73static void igb_free_all_tx_resources(struct igb_adapter *);
74static void igb_free_all_rx_resources(struct igb_adapter *);
75static void igb_free_tx_resources(struct igb_adapter *, struct igb_ring *);
76static void igb_free_rx_resources(struct igb_adapter *, struct igb_ring *);
77void igb_update_stats(struct igb_adapter *);
78static int igb_probe(struct pci_dev *, const struct pci_device_id *);
79static void __devexit igb_remove(struct pci_dev *pdev);
80static int igb_sw_init(struct igb_adapter *);
81static int igb_open(struct net_device *);
82static int igb_close(struct net_device *);
83static void igb_configure_tx(struct igb_adapter *);
84static void igb_configure_rx(struct igb_adapter *);
85static void igb_setup_rctl(struct igb_adapter *);
86static void igb_clean_all_tx_rings(struct igb_adapter *);
87static void igb_clean_all_rx_rings(struct igb_adapter *);
88static void igb_clean_tx_ring(struct igb_adapter *, struct igb_ring *);
89static void igb_clean_rx_ring(struct igb_adapter *, struct igb_ring *);
90static void igb_set_multi(struct net_device *);
91static void igb_update_phy_info(unsigned long);
92static void igb_watchdog(unsigned long);
93static void igb_watchdog_task(struct work_struct *);
94static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
95 struct igb_ring *);
96static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
97static struct net_device_stats *igb_get_stats(struct net_device *);
98static int igb_change_mtu(struct net_device *, int);
99static int igb_set_mac(struct net_device *, void *);
100static irqreturn_t igb_intr(int irq, void *);
101static irqreturn_t igb_intr_msi(int irq, void *);
102static irqreturn_t igb_msix_other(int irq, void *);
103static irqreturn_t igb_msix_rx(int irq, void *);
104static irqreturn_t igb_msix_tx(int irq, void *);
105static int igb_clean_rx_ring_msix(struct napi_struct *, int);
106static bool igb_clean_tx_irq(struct igb_adapter *, struct igb_ring *);
107static int igb_clean(struct napi_struct *, int);
108static bool igb_clean_rx_irq_adv(struct igb_adapter *,
109 struct igb_ring *, int *, int);
110static void igb_alloc_rx_buffers_adv(struct igb_adapter *,
111 struct igb_ring *, int);
112static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
113static void igb_tx_timeout(struct net_device *);
114static void igb_reset_task(struct work_struct *);
115static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
116static void igb_vlan_rx_add_vid(struct net_device *, u16);
117static void igb_vlan_rx_kill_vid(struct net_device *, u16);
118static void igb_restore_vlan(struct igb_adapter *);
119
120static int igb_suspend(struct pci_dev *, pm_message_t);
121#ifdef CONFIG_PM
122static int igb_resume(struct pci_dev *);
123#endif
124static void igb_shutdown(struct pci_dev *);
125
126#ifdef CONFIG_NET_POLL_CONTROLLER
127/* for netdump / net console */
128static void igb_netpoll(struct net_device *);
129#endif
130
131static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
132 pci_channel_state_t);
133static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
134static void igb_io_resume(struct pci_dev *);
135
136static struct pci_error_handlers igb_err_handler = {
137 .error_detected = igb_io_error_detected,
138 .slot_reset = igb_io_slot_reset,
139 .resume = igb_io_resume,
140};
141
142
143static struct pci_driver igb_driver = {
144 .name = igb_driver_name,
145 .id_table = igb_pci_tbl,
146 .probe = igb_probe,
147 .remove = __devexit_p(igb_remove),
148#ifdef CONFIG_PM
149 /* Power Managment Hooks */
150 .suspend = igb_suspend,
151 .resume = igb_resume,
152#endif
153 .shutdown = igb_shutdown,
154 .err_handler = &igb_err_handler
155};
156
157MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
158MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
159MODULE_LICENSE("GPL");
160MODULE_VERSION(DRV_VERSION);
161
162#ifdef DEBUG
163/**
164 * igb_get_hw_dev_name - return device name string
165 * used by hardware layer to print debugging information
166 **/
167char *igb_get_hw_dev_name(struct e1000_hw *hw)
168{
169 struct igb_adapter *adapter = hw->back;
170 return adapter->netdev->name;
171}
172#endif
173
174/**
175 * igb_init_module - Driver Registration Routine
176 *
177 * igb_init_module is the first routine called when the driver is
178 * loaded. All it does is register with the PCI subsystem.
179 **/
180static int __init igb_init_module(void)
181{
182 int ret;
183 printk(KERN_INFO "%s - version %s\n",
184 igb_driver_string, igb_driver_version);
185
186 printk(KERN_INFO "%s\n", igb_copyright);
187
188 ret = pci_register_driver(&igb_driver);
189 return ret;
190}
191
192module_init(igb_init_module);
193
194/**
195 * igb_exit_module - Driver Exit Cleanup Routine
196 *
197 * igb_exit_module is called just before the driver is removed
198 * from memory.
199 **/
200static void __exit igb_exit_module(void)
201{
202 pci_unregister_driver(&igb_driver);
203}
204
205module_exit(igb_exit_module);
206
207/**
208 * igb_alloc_queues - Allocate memory for all rings
209 * @adapter: board private structure to initialize
210 *
211 * We allocate one ring per queue at run-time since we don't know the
212 * number of queues at compile-time.
213 **/
214static int igb_alloc_queues(struct igb_adapter *adapter)
215{
216 int i;
217
218 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
219 sizeof(struct igb_ring), GFP_KERNEL);
220 if (!adapter->tx_ring)
221 return -ENOMEM;
222
223 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
224 sizeof(struct igb_ring), GFP_KERNEL);
225 if (!adapter->rx_ring) {
226 kfree(adapter->tx_ring);
227 return -ENOMEM;
228 }
229
230 for (i = 0; i < adapter->num_rx_queues; i++) {
231 struct igb_ring *ring = &(adapter->rx_ring[i]);
232 ring->adapter = adapter;
233 ring->itr_register = E1000_ITR;
234
235 if (!ring->napi.poll)
236 netif_napi_add(adapter->netdev, &ring->napi, igb_clean,
237 adapter->napi.weight /
238 adapter->num_rx_queues);
239 }
240 return 0;
241}
242
243#define IGB_N0_QUEUE -1
244static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
245 int tx_queue, int msix_vector)
246{
247 u32 msixbm = 0;
248 struct e1000_hw *hw = &adapter->hw;
249 /* The 82575 assigns vectors using a bitmask, which matches the
250 bitmask for the EICR/EIMS/EIMC registers. To assign one
251 or more queues to a vector, we write the appropriate bits
252 into the MSIXBM register for that vector. */
253 if (rx_queue > IGB_N0_QUEUE) {
254 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
255 adapter->rx_ring[rx_queue].eims_value = msixbm;
256 }
257 if (tx_queue > IGB_N0_QUEUE) {
258 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
259 adapter->tx_ring[tx_queue].eims_value =
260 E1000_EICR_TX_QUEUE0 << tx_queue;
261 }
262 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
263}
264
265/**
266 * igb_configure_msix - Configure MSI-X hardware
267 *
268 * igb_configure_msix sets up the hardware to properly
269 * generate MSI-X interrupts.
270 **/
271static void igb_configure_msix(struct igb_adapter *adapter)
272{
273 u32 tmp;
274 int i, vector = 0;
275 struct e1000_hw *hw = &adapter->hw;
276
277 adapter->eims_enable_mask = 0;
278
279 for (i = 0; i < adapter->num_tx_queues; i++) {
280 struct igb_ring *tx_ring = &adapter->tx_ring[i];
281 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
282 adapter->eims_enable_mask |= tx_ring->eims_value;
283 if (tx_ring->itr_val)
284 writel(1000000000 / (tx_ring->itr_val * 256),
285 hw->hw_addr + tx_ring->itr_register);
286 else
287 writel(1, hw->hw_addr + tx_ring->itr_register);
288 }
289
290 for (i = 0; i < adapter->num_rx_queues; i++) {
291 struct igb_ring *rx_ring = &adapter->rx_ring[i];
292 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
293 adapter->eims_enable_mask |= rx_ring->eims_value;
294 if (rx_ring->itr_val)
295 writel(1000000000 / (rx_ring->itr_val * 256),
296 hw->hw_addr + rx_ring->itr_register);
297 else
298 writel(1, hw->hw_addr + rx_ring->itr_register);
299 }
300
301
302 /* set vector for other causes, i.e. link changes */
303 array_wr32(E1000_MSIXBM(0), vector++,
304 E1000_EIMS_OTHER);
305
306 /* disable IAM for ICR interrupt bits */
307 wr32(E1000_IAM, 0);
308
309 tmp = rd32(E1000_CTRL_EXT);
310 /* enable MSI-X PBA support*/
311 tmp |= E1000_CTRL_EXT_PBA_CLR;
312
313 /* Auto-Mask interrupts upon ICR read. */
314 tmp |= E1000_CTRL_EXT_EIAME;
315 tmp |= E1000_CTRL_EXT_IRCA;
316
317 wr32(E1000_CTRL_EXT, tmp);
318 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
319
320 wrfl();
321}
322
323/**
324 * igb_request_msix - Initialize MSI-X interrupts
325 *
326 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
327 * kernel.
328 **/
329static int igb_request_msix(struct igb_adapter *adapter)
330{
331 struct net_device *netdev = adapter->netdev;
332 int i, err = 0, vector = 0;
333
334 vector = 0;
335
336 for (i = 0; i < adapter->num_tx_queues; i++) {
337 struct igb_ring *ring = &(adapter->tx_ring[i]);
338 sprintf(ring->name, "%s-tx%d", netdev->name, i);
339 err = request_irq(adapter->msix_entries[vector].vector,
340 &igb_msix_tx, 0, ring->name,
341 &(adapter->tx_ring[i]));
342 if (err)
343 goto out;
344 ring->itr_register = E1000_EITR(0) + (vector << 2);
345 ring->itr_val = adapter->itr;
346 vector++;
347 }
348 for (i = 0; i < adapter->num_rx_queues; i++) {
349 struct igb_ring *ring = &(adapter->rx_ring[i]);
350 if (strlen(netdev->name) < (IFNAMSIZ - 5))
351 sprintf(ring->name, "%s-rx%d", netdev->name, i);
352 else
353 memcpy(ring->name, netdev->name, IFNAMSIZ);
354 err = request_irq(adapter->msix_entries[vector].vector,
355 &igb_msix_rx, 0, ring->name,
356 &(adapter->rx_ring[i]));
357 if (err)
358 goto out;
359 ring->itr_register = E1000_EITR(0) + (vector << 2);
360 ring->itr_val = adapter->itr;
361 vector++;
362 }
363
364 err = request_irq(adapter->msix_entries[vector].vector,
365 &igb_msix_other, 0, netdev->name, netdev);
366 if (err)
367 goto out;
368
369 adapter->napi.poll = igb_clean_rx_ring_msix;
370 for (i = 0; i < adapter->num_rx_queues; i++)
371 adapter->rx_ring[i].napi.poll = adapter->napi.poll;
372 igb_configure_msix(adapter);
373 return 0;
374out:
375 return err;
376}
377
378static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
379{
380 if (adapter->msix_entries) {
381 pci_disable_msix(adapter->pdev);
382 kfree(adapter->msix_entries);
383 adapter->msix_entries = NULL;
384 } else if (adapter->msi_enabled)
385 pci_disable_msi(adapter->pdev);
386 return;
387}
388
389
390/**
391 * igb_set_interrupt_capability - set MSI or MSI-X if supported
392 *
393 * Attempt to configure interrupts using the best available
394 * capabilities of the hardware and kernel.
395 **/
396static void igb_set_interrupt_capability(struct igb_adapter *adapter)
397{
398 int err;
399 int numvecs, i;
400
401 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
402 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
403 GFP_KERNEL);
404 if (!adapter->msix_entries)
405 goto msi_only;
406
407 for (i = 0; i < numvecs; i++)
408 adapter->msix_entries[i].entry = i;
409
410 err = pci_enable_msix(adapter->pdev,
411 adapter->msix_entries,
412 numvecs);
413 if (err == 0)
414 return;
415
416 igb_reset_interrupt_capability(adapter);
417
418 /* If we can't do MSI-X, try MSI */
419msi_only:
420 adapter->num_rx_queues = 1;
421 if (!pci_enable_msi(adapter->pdev))
422 adapter->msi_enabled = 1;
423 return;
424}
425
426/**
427 * igb_request_irq - initialize interrupts
428 *
429 * Attempts to configure interrupts using the best available
430 * capabilities of the hardware and kernel.
431 **/
432static int igb_request_irq(struct igb_adapter *adapter)
433{
434 struct net_device *netdev = adapter->netdev;
435 struct e1000_hw *hw = &adapter->hw;
436 int err = 0;
437
438 if (adapter->msix_entries) {
439 err = igb_request_msix(adapter);
440 if (!err) {
441 struct e1000_hw *hw = &adapter->hw;
442 /* enable IAM, auto-mask,
443 * DO NOT USE EIAME or IAME in legacy mode */
444 wr32(E1000_IAM, IMS_ENABLE_MASK);
445 goto request_done;
446 }
447 /* fall back to MSI */
448 igb_reset_interrupt_capability(adapter);
449 if (!pci_enable_msi(adapter->pdev))
450 adapter->msi_enabled = 1;
451 igb_free_all_tx_resources(adapter);
452 igb_free_all_rx_resources(adapter);
453 adapter->num_rx_queues = 1;
454 igb_alloc_queues(adapter);
455 }
456 if (adapter->msi_enabled) {
457 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
458 netdev->name, netdev);
459 if (!err)
460 goto request_done;
461 /* fall back to legacy interrupts */
462 igb_reset_interrupt_capability(adapter);
463 adapter->msi_enabled = 0;
464 }
465
466 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
467 netdev->name, netdev);
468
469 if (err) {
470 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
471 err);
472 goto request_done;
473 }
474
475 /* enable IAM, auto-mask */
476 wr32(E1000_IAM, IMS_ENABLE_MASK);
477
478request_done:
479 return err;
480}
481
482static void igb_free_irq(struct igb_adapter *adapter)
483{
484 struct net_device *netdev = adapter->netdev;
485
486 if (adapter->msix_entries) {
487 int vector = 0, i;
488
489 for (i = 0; i < adapter->num_tx_queues; i++)
490 free_irq(adapter->msix_entries[vector++].vector,
491 &(adapter->tx_ring[i]));
492 for (i = 0; i < adapter->num_rx_queues; i++)
493 free_irq(adapter->msix_entries[vector++].vector,
494 &(adapter->rx_ring[i]));
495
496 free_irq(adapter->msix_entries[vector++].vector, netdev);
497 return;
498 }
499
500 free_irq(adapter->pdev->irq, netdev);
501}
502
503/**
504 * igb_irq_disable - Mask off interrupt generation on the NIC
505 * @adapter: board private structure
506 **/
507static void igb_irq_disable(struct igb_adapter *adapter)
508{
509 struct e1000_hw *hw = &adapter->hw;
510
511 if (adapter->msix_entries) {
512 wr32(E1000_EIMC, ~0);
513 wr32(E1000_EIAC, 0);
514 }
515 wr32(E1000_IMC, ~0);
516 wrfl();
517 synchronize_irq(adapter->pdev->irq);
518}
519
520/**
521 * igb_irq_enable - Enable default interrupt generation settings
522 * @adapter: board private structure
523 **/
524static void igb_irq_enable(struct igb_adapter *adapter)
525{
526 struct e1000_hw *hw = &adapter->hw;
527
528 if (adapter->msix_entries) {
529 wr32(E1000_EIMS,
530 adapter->eims_enable_mask);
531 wr32(E1000_EIAC,
532 adapter->eims_enable_mask);
533 wr32(E1000_IMS, E1000_IMS_LSC);
534 } else
535 wr32(E1000_IMS, IMS_ENABLE_MASK);
536}
537
538static void igb_update_mng_vlan(struct igb_adapter *adapter)
539{
540 struct net_device *netdev = adapter->netdev;
541 u16 vid = adapter->hw.mng_cookie.vlan_id;
542 u16 old_vid = adapter->mng_vlan_id;
543 if (adapter->vlgrp) {
544 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
545 if (adapter->hw.mng_cookie.status &
546 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
547 igb_vlan_rx_add_vid(netdev, vid);
548 adapter->mng_vlan_id = vid;
549 } else
550 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
551
552 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
553 (vid != old_vid) &&
554 !vlan_group_get_device(adapter->vlgrp, old_vid))
555 igb_vlan_rx_kill_vid(netdev, old_vid);
556 } else
557 adapter->mng_vlan_id = vid;
558 }
559}
560
561/**
562 * igb_release_hw_control - release control of the h/w to f/w
563 * @adapter: address of board private structure
564 *
565 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
566 * For ASF and Pass Through versions of f/w this means that the
567 * driver is no longer loaded.
568 *
569 **/
570static void igb_release_hw_control(struct igb_adapter *adapter)
571{
572 struct e1000_hw *hw = &adapter->hw;
573 u32 ctrl_ext;
574
575 /* Let firmware take over control of h/w */
576 ctrl_ext = rd32(E1000_CTRL_EXT);
577 wr32(E1000_CTRL_EXT,
578 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
579}
580
581
582/**
583 * igb_get_hw_control - get control of the h/w from f/w
584 * @adapter: address of board private structure
585 *
586 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
587 * For ASF and Pass Through versions of f/w this means that
588 * the driver is loaded.
589 *
590 **/
591static void igb_get_hw_control(struct igb_adapter *adapter)
592{
593 struct e1000_hw *hw = &adapter->hw;
594 u32 ctrl_ext;
595
596 /* Let firmware know the driver has taken over */
597 ctrl_ext = rd32(E1000_CTRL_EXT);
598 wr32(E1000_CTRL_EXT,
599 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
600}
601
602static void igb_init_manageability(struct igb_adapter *adapter)
603{
604 struct e1000_hw *hw = &adapter->hw;
605
606 if (adapter->en_mng_pt) {
607 u32 manc2h = rd32(E1000_MANC2H);
608 u32 manc = rd32(E1000_MANC);
609
610 /* disable hardware interception of ARP */
611 manc &= ~(E1000_MANC_ARP_EN);
612
613 /* enable receiving management packets to the host */
614 /* this will probably generate destination unreachable messages
615 * from the host OS, but the packets will be handled on SMBUS */
616 manc |= E1000_MANC_EN_MNG2HOST;
617#define E1000_MNG2HOST_PORT_623 (1 << 5)
618#define E1000_MNG2HOST_PORT_664 (1 << 6)
619 manc2h |= E1000_MNG2HOST_PORT_623;
620 manc2h |= E1000_MNG2HOST_PORT_664;
621 wr32(E1000_MANC2H, manc2h);
622
623 wr32(E1000_MANC, manc);
624 }
625}
626
627static void igb_release_manageability(struct igb_adapter *adapter)
628{
629 struct e1000_hw *hw = &adapter->hw;
630
631 if (adapter->en_mng_pt) {
632 u32 manc = rd32(E1000_MANC);
633
634 /* re-enable hardware interception of ARP */
635 manc |= E1000_MANC_ARP_EN;
636 manc &= ~E1000_MANC_EN_MNG2HOST;
637
638 /* don't explicitly have to mess with MANC2H since
639 * MANC has an enable disable that gates MANC2H */
640
641 /* XXX stop the hardware watchdog ? */
642 wr32(E1000_MANC, manc);
643 }
644}
645
646/**
647 * igb_configure - configure the hardware for RX and TX
648 * @adapter: private board structure
649 **/
650static void igb_configure(struct igb_adapter *adapter)
651{
652 struct net_device *netdev = adapter->netdev;
653 int i;
654
655 igb_get_hw_control(adapter);
656 igb_set_multi(netdev);
657
658 igb_restore_vlan(adapter);
659 igb_init_manageability(adapter);
660
661 igb_configure_tx(adapter);
662 igb_setup_rctl(adapter);
663 igb_configure_rx(adapter);
664 /* call IGB_DESC_UNUSED which always leaves
665 * at least 1 descriptor unused to make sure
666 * next_to_use != next_to_clean */
667 for (i = 0; i < adapter->num_rx_queues; i++) {
668 struct igb_ring *ring = &adapter->rx_ring[i];
669 igb_alloc_rx_buffers_adv(adapter, ring, IGB_DESC_UNUSED(ring));
670 }
671
672
673 adapter->tx_queue_len = netdev->tx_queue_len;
674}
675
676
677/**
678 * igb_up - Open the interface and prepare it to handle traffic
679 * @adapter: board private structure
680 **/
681
682int igb_up(struct igb_adapter *adapter)
683{
684 struct e1000_hw *hw = &adapter->hw;
685 int i;
686
687 /* hardware has been reset, we need to reload some things */
688 igb_configure(adapter);
689
690 clear_bit(__IGB_DOWN, &adapter->state);
691
692 napi_enable(&adapter->napi);
693
694 if (adapter->msix_entries) {
695 for (i = 0; i < adapter->num_rx_queues; i++)
696 napi_enable(&adapter->rx_ring[i].napi);
697 igb_configure_msix(adapter);
698 }
699
700 /* Clear any pending interrupts. */
701 rd32(E1000_ICR);
702 igb_irq_enable(adapter);
703
704 /* Fire a link change interrupt to start the watchdog. */
705 wr32(E1000_ICS, E1000_ICS_LSC);
706 return 0;
707}
708
709void igb_down(struct igb_adapter *adapter)
710{
711 struct e1000_hw *hw = &adapter->hw;
712 struct net_device *netdev = adapter->netdev;
713 u32 tctl, rctl;
714 int i;
715
716 /* signal that we're down so the interrupt handler does not
717 * reschedule our watchdog timer */
718 set_bit(__IGB_DOWN, &adapter->state);
719
720 /* disable receives in the hardware */
721 rctl = rd32(E1000_RCTL);
722 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
723 /* flush and sleep below */
724
725 netif_stop_queue(netdev);
726
727 /* disable transmits in the hardware */
728 tctl = rd32(E1000_TCTL);
729 tctl &= ~E1000_TCTL_EN;
730 wr32(E1000_TCTL, tctl);
731 /* flush both disables and wait for them to finish */
732 wrfl();
733 msleep(10);
734
735 napi_disable(&adapter->napi);
736
737 if (adapter->msix_entries)
738 for (i = 0; i < adapter->num_rx_queues; i++)
739 napi_disable(&adapter->rx_ring[i].napi);
740 igb_irq_disable(adapter);
741
742 del_timer_sync(&adapter->watchdog_timer);
743 del_timer_sync(&adapter->phy_info_timer);
744
745 netdev->tx_queue_len = adapter->tx_queue_len;
746 netif_carrier_off(netdev);
747 adapter->link_speed = 0;
748 adapter->link_duplex = 0;
749
750 igb_reset(adapter);
751 igb_clean_all_tx_rings(adapter);
752 igb_clean_all_rx_rings(adapter);
753}
754
755void igb_reinit_locked(struct igb_adapter *adapter)
756{
757 WARN_ON(in_interrupt());
758 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
759 msleep(1);
760 igb_down(adapter);
761 igb_up(adapter);
762 clear_bit(__IGB_RESETTING, &adapter->state);
763}
764
765void igb_reset(struct igb_adapter *adapter)
766{
767 struct e1000_hw *hw = &adapter->hw;
768 struct e1000_fc_info *fc = &adapter->hw.fc;
769 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
770 u16 hwm;
771
772 /* Repartition Pba for greater than 9k mtu
773 * To take effect CTRL.RST is required.
774 */
775 pba = E1000_PBA_34K;
776
777 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
778 /* adjust PBA for jumbo frames */
779 wr32(E1000_PBA, pba);
780
781 /* To maintain wire speed transmits, the Tx FIFO should be
782 * large enough to accommodate two full transmit packets,
783 * rounded up to the next 1KB and expressed in KB. Likewise,
784 * the Rx FIFO should be large enough to accommodate at least
785 * one full receive packet and is similarly rounded up and
786 * expressed in KB. */
787 pba = rd32(E1000_PBA);
788 /* upper 16 bits has Tx packet buffer allocation size in KB */
789 tx_space = pba >> 16;
790 /* lower 16 bits has Rx packet buffer allocation size in KB */
791 pba &= 0xffff;
792 /* the tx fifo also stores 16 bytes of information about the tx
793 * but don't include ethernet FCS because hardware appends it */
794 min_tx_space = (adapter->max_frame_size +
795 sizeof(struct e1000_tx_desc) -
796 ETH_FCS_LEN) * 2;
797 min_tx_space = ALIGN(min_tx_space, 1024);
798 min_tx_space >>= 10;
799 /* software strips receive CRC, so leave room for it */
800 min_rx_space = adapter->max_frame_size;
801 min_rx_space = ALIGN(min_rx_space, 1024);
802 min_rx_space >>= 10;
803
804 /* If current Tx allocation is less than the min Tx FIFO size,
805 * and the min Tx FIFO size is less than the current Rx FIFO
806 * allocation, take space away from current Rx allocation */
807 if (tx_space < min_tx_space &&
808 ((min_tx_space - tx_space) < pba)) {
809 pba = pba - (min_tx_space - tx_space);
810
811 /* if short on rx space, rx wins and must trump tx
812 * adjustment */
813 if (pba < min_rx_space)
814 pba = min_rx_space;
815 }
816 }
817 wr32(E1000_PBA, pba);
818
819 /* flow control settings */
820 /* The high water mark must be low enough to fit one full frame
821 * (or the size used for early receive) above it in the Rx FIFO.
822 * Set it to the lower of:
823 * - 90% of the Rx FIFO size, or
824 * - the full Rx FIFO size minus one full frame */
825 hwm = min(((pba << 10) * 9 / 10),
826 ((pba << 10) - adapter->max_frame_size));
827
828 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
829 fc->low_water = fc->high_water - 8;
830 fc->pause_time = 0xFFFF;
831 fc->send_xon = 1;
832 fc->type = fc->original_type;
833
834 /* Allow time for pending master requests to run */
835 adapter->hw.mac.ops.reset_hw(&adapter->hw);
836 wr32(E1000_WUC, 0);
837
838 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
839 dev_err(&adapter->pdev->dev, "Hardware Error\n");
840
841 igb_update_mng_vlan(adapter);
842
843 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
844 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
845
846 igb_reset_adaptive(&adapter->hw);
847 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
848 igb_release_manageability(adapter);
849}
850
851/**
852 * igb_probe - Device Initialization Routine
853 * @pdev: PCI device information struct
854 * @ent: entry in igb_pci_tbl
855 *
856 * Returns 0 on success, negative on failure
857 *
858 * igb_probe initializes an adapter identified by a pci_dev structure.
859 * The OS initialization, configuring of the adapter private structure,
860 * and a hardware reset occur.
861 **/
862static int __devinit igb_probe(struct pci_dev *pdev,
863 const struct pci_device_id *ent)
864{
865 struct net_device *netdev;
866 struct igb_adapter *adapter;
867 struct e1000_hw *hw;
868 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
869 unsigned long mmio_start, mmio_len;
870 static int cards_found;
871 int i, err, pci_using_dac;
872 u16 eeprom_data = 0;
873 u16 eeprom_apme_mask = IGB_EEPROM_APME;
874 u32 part_num;
875
876 err = pci_enable_device(pdev);
877 if (err)
878 return err;
879
880 pci_using_dac = 0;
881 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
882 if (!err) {
883 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
884 if (!err)
885 pci_using_dac = 1;
886 } else {
887 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
888 if (err) {
889 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
890 if (err) {
891 dev_err(&pdev->dev, "No usable DMA "
892 "configuration, aborting\n");
893 goto err_dma;
894 }
895 }
896 }
897
898 err = pci_request_regions(pdev, igb_driver_name);
899 if (err)
900 goto err_pci_reg;
901
902 pci_set_master(pdev);
903
904 err = -ENOMEM;
905 netdev = alloc_etherdev(sizeof(struct igb_adapter));
906 if (!netdev)
907 goto err_alloc_etherdev;
908
909 SET_NETDEV_DEV(netdev, &pdev->dev);
910
911 pci_set_drvdata(pdev, netdev);
912 adapter = netdev_priv(netdev);
913 adapter->netdev = netdev;
914 adapter->pdev = pdev;
915 hw = &adapter->hw;
916 hw->back = adapter;
917 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
918
919 mmio_start = pci_resource_start(pdev, 0);
920 mmio_len = pci_resource_len(pdev, 0);
921
922 err = -EIO;
923 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
924 if (!adapter->hw.hw_addr)
925 goto err_ioremap;
926
927 netdev->open = &igb_open;
928 netdev->stop = &igb_close;
929 netdev->get_stats = &igb_get_stats;
930 netdev->set_multicast_list = &igb_set_multi;
931 netdev->set_mac_address = &igb_set_mac;
932 netdev->change_mtu = &igb_change_mtu;
933 netdev->do_ioctl = &igb_ioctl;
934 igb_set_ethtool_ops(netdev);
935 netdev->tx_timeout = &igb_tx_timeout;
936 netdev->watchdog_timeo = 5 * HZ;
937 netif_napi_add(netdev, &adapter->napi, igb_clean, 64);
938 netdev->vlan_rx_register = igb_vlan_rx_register;
939 netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
940 netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
941#ifdef CONFIG_NET_POLL_CONTROLLER
942 netdev->poll_controller = igb_netpoll;
943#endif
944 netdev->hard_start_xmit = &igb_xmit_frame_adv;
945
946 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
947
948 netdev->mem_start = mmio_start;
949 netdev->mem_end = mmio_start + mmio_len;
950
951 adapter->bd_number = cards_found;
952
953 /* PCI config space info */
954 hw->vendor_id = pdev->vendor;
955 hw->device_id = pdev->device;
956 hw->revision_id = pdev->revision;
957 hw->subsystem_vendor_id = pdev->subsystem_vendor;
958 hw->subsystem_device_id = pdev->subsystem_device;
959
960 /* setup the private structure */
961 hw->back = adapter;
962 /* Copy the default MAC, PHY and NVM function pointers */
963 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
964 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
965 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
966 /* Initialize skew-specific constants */
967 err = ei->get_invariants(hw);
968 if (err)
969 goto err_hw_init;
970
971 err = igb_sw_init(adapter);
972 if (err)
973 goto err_sw_init;
974
975 igb_get_bus_info_pcie(hw);
976
977 hw->phy.autoneg_wait_to_complete = false;
978 hw->mac.adaptive_ifs = true;
979
980 /* Copper options */
981 if (hw->phy.media_type == e1000_media_type_copper) {
982 hw->phy.mdix = AUTO_ALL_MODES;
983 hw->phy.disable_polarity_correction = false;
984 hw->phy.ms_type = e1000_ms_hw_default;
985 }
986
987 if (igb_check_reset_block(hw))
988 dev_info(&pdev->dev,
989 "PHY reset is blocked due to SOL/IDER session.\n");
990
991 netdev->features = NETIF_F_SG |
992 NETIF_F_HW_CSUM |
993 NETIF_F_HW_VLAN_TX |
994 NETIF_F_HW_VLAN_RX |
995 NETIF_F_HW_VLAN_FILTER;
996
997 netdev->features |= NETIF_F_TSO;
998
999 netdev->features |= NETIF_F_TSO6;
1000 if (pci_using_dac)
1001 netdev->features |= NETIF_F_HIGHDMA;
1002
1003 netdev->features |= NETIF_F_LLTX;
1004 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1005
1006 /* before reading the NVM, reset the controller to put the device in a
1007 * known good starting state */
1008 hw->mac.ops.reset_hw(hw);
1009
1010 /* make sure the NVM is good */
1011 if (igb_validate_nvm_checksum(hw) < 0) {
1012 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1013 err = -EIO;
1014 goto err_eeprom;
1015 }
1016
1017 /* copy the MAC address out of the NVM */
1018 if (hw->mac.ops.read_mac_addr(hw))
1019 dev_err(&pdev->dev, "NVM Read Error\n");
1020
1021 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1022 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1023
1024 if (!is_valid_ether_addr(netdev->perm_addr)) {
1025 dev_err(&pdev->dev, "Invalid MAC Address\n");
1026 err = -EIO;
1027 goto err_eeprom;
1028 }
1029
1030 init_timer(&adapter->watchdog_timer);
1031 adapter->watchdog_timer.function = &igb_watchdog;
1032 adapter->watchdog_timer.data = (unsigned long) adapter;
1033
1034 init_timer(&adapter->phy_info_timer);
1035 adapter->phy_info_timer.function = &igb_update_phy_info;
1036 adapter->phy_info_timer.data = (unsigned long) adapter;
1037
1038 INIT_WORK(&adapter->reset_task, igb_reset_task);
1039 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1040
1041 /* Initialize link & ring properties that are user-changeable */
1042 adapter->tx_ring->count = 256;
1043 for (i = 0; i < adapter->num_tx_queues; i++)
1044 adapter->tx_ring[i].count = adapter->tx_ring->count;
1045 adapter->rx_ring->count = 256;
1046 for (i = 0; i < adapter->num_rx_queues; i++)
1047 adapter->rx_ring[i].count = adapter->rx_ring->count;
1048
1049 adapter->fc_autoneg = true;
1050 hw->mac.autoneg = true;
1051 hw->phy.autoneg_advertised = 0x2f;
1052
1053 hw->fc.original_type = e1000_fc_default;
1054 hw->fc.type = e1000_fc_default;
1055
1056 adapter->itr_setting = 3;
1057 adapter->itr = IGB_START_ITR;
1058
1059 igb_validate_mdi_setting(hw);
1060
1061 adapter->rx_csum = 1;
1062
1063 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1064 * enable the ACPI Magic Packet filter
1065 */
1066
1067 if (hw->bus.func == 0 ||
1068 hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1069 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
1070 &eeprom_data);
1071
1072 if (eeprom_data & eeprom_apme_mask)
1073 adapter->eeprom_wol |= E1000_WUFC_MAG;
1074
1075 /* now that we have the eeprom settings, apply the special cases where
1076 * the eeprom may be wrong or the board simply won't support wake on
1077 * lan on a particular port */
1078 switch (pdev->device) {
1079 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1080 adapter->eeprom_wol = 0;
1081 break;
1082 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1083 /* Wake events only supported on port A for dual fiber
1084 * regardless of eeprom setting */
1085 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1086 adapter->eeprom_wol = 0;
1087 break;
1088 }
1089
1090 /* initialize the wol settings based on the eeprom settings */
1091 adapter->wol = adapter->eeprom_wol;
1092
1093 /* reset the hardware with the new settings */
1094 igb_reset(adapter);
1095
1096 /* let the f/w know that the h/w is now under the control of the
1097 * driver. */
1098 igb_get_hw_control(adapter);
1099
1100 /* tell the stack to leave us alone until igb_open() is called */
1101 netif_carrier_off(netdev);
1102 netif_stop_queue(netdev);
1103
1104 strcpy(netdev->name, "eth%d");
1105 err = register_netdev(netdev);
1106 if (err)
1107 goto err_register;
1108
1109 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1110 /* print bus type/speed/width info */
1111 dev_info(&pdev->dev,
1112 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1113 netdev->name,
1114 ((hw->bus.speed == e1000_bus_speed_2500)
1115 ? "2.5Gb/s" : "unknown"),
1116 ((hw->bus.width == e1000_bus_width_pcie_x4)
1117 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1118 ? "Width x1" : "unknown"),
1119 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
1120 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1121
1122 igb_read_part_num(hw, &part_num);
1123 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1124 (part_num >> 8), (part_num & 0xff));
1125
1126 dev_info(&pdev->dev,
1127 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1128 adapter->msix_entries ? "MSI-X" :
1129 adapter->msi_enabled ? "MSI" : "legacy",
1130 adapter->num_rx_queues, adapter->num_tx_queues);
1131
1132 cards_found++;
1133 return 0;
1134
1135err_register:
1136 igb_release_hw_control(adapter);
1137err_eeprom:
1138 if (!igb_check_reset_block(hw))
1139 hw->phy.ops.reset_phy(hw);
1140
1141 if (hw->flash_address)
1142 iounmap(hw->flash_address);
1143
1144 igb_remove_device(hw);
1145 kfree(adapter->tx_ring);
1146 kfree(adapter->rx_ring);
1147err_sw_init:
1148err_hw_init:
1149 iounmap(hw->hw_addr);
1150err_ioremap:
1151 free_netdev(netdev);
1152err_alloc_etherdev:
1153 pci_release_regions(pdev);
1154err_pci_reg:
1155err_dma:
1156 pci_disable_device(pdev);
1157 return err;
1158}
1159
1160/**
1161 * igb_remove - Device Removal Routine
1162 * @pdev: PCI device information struct
1163 *
1164 * igb_remove is called by the PCI subsystem to alert the driver
1165 * that it should release a PCI device. The could be caused by a
1166 * Hot-Plug event, or because the driver is going to be removed from
1167 * memory.
1168 **/
1169static void __devexit igb_remove(struct pci_dev *pdev)
1170{
1171 struct net_device *netdev = pci_get_drvdata(pdev);
1172 struct igb_adapter *adapter = netdev_priv(netdev);
1173
1174 /* flush_scheduled work may reschedule our watchdog task, so
1175 * explicitly disable watchdog tasks from being rescheduled */
1176 set_bit(__IGB_DOWN, &adapter->state);
1177 del_timer_sync(&adapter->watchdog_timer);
1178 del_timer_sync(&adapter->phy_info_timer);
1179
1180 flush_scheduled_work();
1181
1182
1183 igb_release_manageability(adapter);
1184
1185 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1186 * would have already happened in close and is redundant. */
1187 igb_release_hw_control(adapter);
1188
1189 unregister_netdev(netdev);
1190
1191 if (!igb_check_reset_block(&adapter->hw))
1192 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1193
1194 igb_remove_device(&adapter->hw);
1195 igb_reset_interrupt_capability(adapter);
1196
1197 kfree(adapter->tx_ring);
1198 kfree(adapter->rx_ring);
1199
1200 iounmap(adapter->hw.hw_addr);
1201 if (adapter->hw.flash_address)
1202 iounmap(adapter->hw.flash_address);
1203 pci_release_regions(pdev);
1204
1205 free_netdev(netdev);
1206
1207 pci_disable_device(pdev);
1208}
1209
1210/**
1211 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1212 * @adapter: board private structure to initialize
1213 *
1214 * igb_sw_init initializes the Adapter private data structure.
1215 * Fields are initialized based on PCI device information and
1216 * OS network device settings (MTU size).
1217 **/
1218static int __devinit igb_sw_init(struct igb_adapter *adapter)
1219{
1220 struct e1000_hw *hw = &adapter->hw;
1221 struct net_device *netdev = adapter->netdev;
1222 struct pci_dev *pdev = adapter->pdev;
1223
1224 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1225
1226 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1227 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1228 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1229 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1230
1231 /* Number of supported queues. */
1232 /* Having more queues than CPUs doesn't make sense. */
1233 adapter->num_tx_queues = 1;
1234 adapter->num_rx_queues = min(IGB_MAX_RX_QUEUES, num_online_cpus());
1235
1236 igb_set_interrupt_capability(adapter);
1237
1238 if (igb_alloc_queues(adapter)) {
1239 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1240 return -ENOMEM;
1241 }
1242
1243 /* Explicitly disable IRQ since the NIC can be in any state. */
1244 igb_irq_disable(adapter);
1245
1246 set_bit(__IGB_DOWN, &adapter->state);
1247 return 0;
1248}
1249
1250/**
1251 * igb_open - Called when a network interface is made active
1252 * @netdev: network interface device structure
1253 *
1254 * Returns 0 on success, negative value on failure
1255 *
1256 * The open entry point is called when a network interface is made
1257 * active by the system (IFF_UP). At this point all resources needed
1258 * for transmit and receive operations are allocated, the interrupt
1259 * handler is registered with the OS, the watchdog timer is started,
1260 * and the stack is notified that the interface is ready.
1261 **/
1262static int igb_open(struct net_device *netdev)
1263{
1264 struct igb_adapter *adapter = netdev_priv(netdev);
1265 struct e1000_hw *hw = &adapter->hw;
1266 int err;
1267 int i;
1268
1269 /* disallow open during test */
1270 if (test_bit(__IGB_TESTING, &adapter->state))
1271 return -EBUSY;
1272
1273 /* allocate transmit descriptors */
1274 err = igb_setup_all_tx_resources(adapter);
1275 if (err)
1276 goto err_setup_tx;
1277
1278 /* allocate receive descriptors */
1279 err = igb_setup_all_rx_resources(adapter);
1280 if (err)
1281 goto err_setup_rx;
1282
1283 /* e1000_power_up_phy(adapter); */
1284
1285 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1286 if ((adapter->hw.mng_cookie.status &
1287 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1288 igb_update_mng_vlan(adapter);
1289
1290 /* before we allocate an interrupt, we must be ready to handle it.
1291 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1292 * as soon as we call pci_request_irq, so we have to setup our
1293 * clean_rx handler before we do so. */
1294 igb_configure(adapter);
1295
1296 err = igb_request_irq(adapter);
1297 if (err)
1298 goto err_req_irq;
1299
1300 /* From here on the code is the same as igb_up() */
1301 clear_bit(__IGB_DOWN, &adapter->state);
1302
1303 napi_enable(&adapter->napi);
1304 if (adapter->msix_entries)
1305 for (i = 0; i < adapter->num_rx_queues; i++)
1306 napi_enable(&adapter->rx_ring[i].napi);
1307
1308 igb_irq_enable(adapter);
1309
1310 /* Clear any pending interrupts. */
1311 rd32(E1000_ICR);
1312 /* Fire a link status change interrupt to start the watchdog. */
1313 wr32(E1000_ICS, E1000_ICS_LSC);
1314
1315 return 0;
1316
1317err_req_irq:
1318 igb_release_hw_control(adapter);
1319 /* e1000_power_down_phy(adapter); */
1320 igb_free_all_rx_resources(adapter);
1321err_setup_rx:
1322 igb_free_all_tx_resources(adapter);
1323err_setup_tx:
1324 igb_reset(adapter);
1325
1326 return err;
1327}
1328
1329/**
1330 * igb_close - Disables a network interface
1331 * @netdev: network interface device structure
1332 *
1333 * Returns 0, this is not allowed to fail
1334 *
1335 * The close entry point is called when an interface is de-activated
1336 * by the OS. The hardware is still under the driver's control, but
1337 * needs to be disabled. A global MAC reset is issued to stop the
1338 * hardware, and all transmit and receive resources are freed.
1339 **/
1340static int igb_close(struct net_device *netdev)
1341{
1342 struct igb_adapter *adapter = netdev_priv(netdev);
1343
1344 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1345 igb_down(adapter);
1346
1347 igb_free_irq(adapter);
1348
1349 igb_free_all_tx_resources(adapter);
1350 igb_free_all_rx_resources(adapter);
1351
1352 /* kill manageability vlan ID if supported, but not if a vlan with
1353 * the same ID is registered on the host OS (let 8021q kill it) */
1354 if ((adapter->hw.mng_cookie.status &
1355 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1356 !(adapter->vlgrp &&
1357 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1358 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1359
1360 return 0;
1361}
1362
1363/**
1364 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1365 * @adapter: board private structure
1366 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1367 *
1368 * Return 0 on success, negative on failure
1369 **/
1370
1371int igb_setup_tx_resources(struct igb_adapter *adapter,
1372 struct igb_ring *tx_ring)
1373{
1374 struct pci_dev *pdev = adapter->pdev;
1375 int size;
1376
1377 size = sizeof(struct igb_buffer) * tx_ring->count;
1378 tx_ring->buffer_info = vmalloc(size);
1379 if (!tx_ring->buffer_info)
1380 goto err;
1381 memset(tx_ring->buffer_info, 0, size);
1382
1383 /* round up to nearest 4K */
1384 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
1385 + sizeof(u32);
1386 tx_ring->size = ALIGN(tx_ring->size, 4096);
1387
1388 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1389 &tx_ring->dma);
1390
1391 if (!tx_ring->desc)
1392 goto err;
1393
1394 tx_ring->adapter = adapter;
1395 tx_ring->next_to_use = 0;
1396 tx_ring->next_to_clean = 0;
1397 spin_lock_init(&tx_ring->tx_clean_lock);
1398 spin_lock_init(&tx_ring->tx_lock);
1399 return 0;
1400
1401err:
1402 vfree(tx_ring->buffer_info);
1403 dev_err(&adapter->pdev->dev,
1404 "Unable to allocate memory for the transmit descriptor ring\n");
1405 return -ENOMEM;
1406}
1407
1408/**
1409 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1410 * (Descriptors) for all queues
1411 * @adapter: board private structure
1412 *
1413 * Return 0 on success, negative on failure
1414 **/
1415static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1416{
1417 int i, err = 0;
1418
1419 for (i = 0; i < adapter->num_tx_queues; i++) {
1420 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1421 if (err) {
1422 dev_err(&adapter->pdev->dev,
1423 "Allocation for Tx Queue %u failed\n", i);
1424 for (i--; i >= 0; i--)
1425 igb_free_tx_resources(adapter,
1426 &adapter->tx_ring[i]);
1427 break;
1428 }
1429 }
1430
1431 return err;
1432}
1433
1434/**
1435 * igb_configure_tx - Configure transmit Unit after Reset
1436 * @adapter: board private structure
1437 *
1438 * Configure the Tx unit of the MAC after a reset.
1439 **/
1440static void igb_configure_tx(struct igb_adapter *adapter)
1441{
1442 u64 tdba, tdwba;
1443 struct e1000_hw *hw = &adapter->hw;
1444 u32 tctl;
1445 u32 txdctl, txctrl;
1446 int i;
1447
1448 for (i = 0; i < adapter->num_tx_queues; i++) {
1449 struct igb_ring *ring = &(adapter->tx_ring[i]);
1450
1451 wr32(E1000_TDLEN(i),
1452 ring->count * sizeof(struct e1000_tx_desc));
1453 tdba = ring->dma;
1454 wr32(E1000_TDBAL(i),
1455 tdba & 0x00000000ffffffffULL);
1456 wr32(E1000_TDBAH(i), tdba >> 32);
1457
1458 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1459 tdwba |= 1; /* enable head wb */
1460 wr32(E1000_TDWBAL(i),
1461 tdwba & 0x00000000ffffffffULL);
1462 wr32(E1000_TDWBAH(i), tdwba >> 32);
1463
1464 ring->head = E1000_TDH(i);
1465 ring->tail = E1000_TDT(i);
1466 writel(0, hw->hw_addr + ring->tail);
1467 writel(0, hw->hw_addr + ring->head);
1468 txdctl = rd32(E1000_TXDCTL(i));
1469 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1470 wr32(E1000_TXDCTL(i), txdctl);
1471
1472 /* Turn off Relaxed Ordering on head write-backs. The
1473 * writebacks MUST be delivered in order or it will
1474 * completely screw up our bookeeping.
1475 */
1476 txctrl = rd32(E1000_DCA_TXCTRL(i));
1477 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1478 wr32(E1000_DCA_TXCTRL(i), txctrl);
1479 }
1480
1481
1482
1483 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1484
1485 /* Program the Transmit Control Register */
1486
1487 tctl = rd32(E1000_TCTL);
1488 tctl &= ~E1000_TCTL_CT;
1489 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1490 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1491
1492 igb_config_collision_dist(hw);
1493
1494 /* Setup Transmit Descriptor Settings for eop descriptor */
1495 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1496
1497 /* Enable transmits */
1498 tctl |= E1000_TCTL_EN;
1499
1500 wr32(E1000_TCTL, tctl);
1501}
1502
1503/**
1504 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1505 * @adapter: board private structure
1506 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1507 *
1508 * Returns 0 on success, negative on failure
1509 **/
1510
1511int igb_setup_rx_resources(struct igb_adapter *adapter,
1512 struct igb_ring *rx_ring)
1513{
1514 struct pci_dev *pdev = adapter->pdev;
1515 int size, desc_len;
1516
1517 size = sizeof(struct igb_buffer) * rx_ring->count;
1518 rx_ring->buffer_info = vmalloc(size);
1519 if (!rx_ring->buffer_info)
1520 goto err;
1521 memset(rx_ring->buffer_info, 0, size);
1522
1523 desc_len = sizeof(union e1000_adv_rx_desc);
1524
1525 /* Round up to nearest 4K */
1526 rx_ring->size = rx_ring->count * desc_len;
1527 rx_ring->size = ALIGN(rx_ring->size, 4096);
1528
1529 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1530 &rx_ring->dma);
1531
1532 if (!rx_ring->desc)
1533 goto err;
1534
1535 rx_ring->next_to_clean = 0;
1536 rx_ring->next_to_use = 0;
1537 rx_ring->pending_skb = NULL;
1538
1539 rx_ring->adapter = adapter;
1540 /* FIXME: do we want to setup ring->napi->poll here? */
1541 rx_ring->napi.poll = adapter->napi.poll;
1542
1543 return 0;
1544
1545err:
1546 vfree(rx_ring->buffer_info);
1547 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1548 "the receive descriptor ring\n");
1549 return -ENOMEM;
1550}
1551
1552/**
1553 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1554 * (Descriptors) for all queues
1555 * @adapter: board private structure
1556 *
1557 * Return 0 on success, negative on failure
1558 **/
1559static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1560{
1561 int i, err = 0;
1562
1563 for (i = 0; i < adapter->num_rx_queues; i++) {
1564 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1565 if (err) {
1566 dev_err(&adapter->pdev->dev,
1567 "Allocation for Rx Queue %u failed\n", i);
1568 for (i--; i >= 0; i--)
1569 igb_free_rx_resources(adapter,
1570 &adapter->rx_ring[i]);
1571 break;
1572 }
1573 }
1574
1575 return err;
1576}
1577
1578/**
1579 * igb_setup_rctl - configure the receive control registers
1580 * @adapter: Board private structure
1581 **/
1582static void igb_setup_rctl(struct igb_adapter *adapter)
1583{
1584 struct e1000_hw *hw = &adapter->hw;
1585 u32 rctl;
1586 u32 srrctl = 0;
1587 int i;
1588
1589 rctl = rd32(E1000_RCTL);
1590
1591 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1592
1593 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1594 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1595 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1596
1597 /* disable the stripping of CRC because it breaks
1598 * BMC firmware connected over SMBUS
1599 rctl |= E1000_RCTL_SECRC;
1600 */
1601
1602 rctl &= ~E1000_RCTL_SBP;
1603
1604 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1605 rctl &= ~E1000_RCTL_LPE;
1606 else
1607 rctl |= E1000_RCTL_LPE;
1608 if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
1609 /* Setup buffer sizes */
1610 rctl &= ~E1000_RCTL_SZ_4096;
1611 rctl |= E1000_RCTL_BSEX;
1612 switch (adapter->rx_buffer_len) {
1613 case IGB_RXBUFFER_256:
1614 rctl |= E1000_RCTL_SZ_256;
1615 rctl &= ~E1000_RCTL_BSEX;
1616 break;
1617 case IGB_RXBUFFER_512:
1618 rctl |= E1000_RCTL_SZ_512;
1619 rctl &= ~E1000_RCTL_BSEX;
1620 break;
1621 case IGB_RXBUFFER_1024:
1622 rctl |= E1000_RCTL_SZ_1024;
1623 rctl &= ~E1000_RCTL_BSEX;
1624 break;
1625 case IGB_RXBUFFER_2048:
1626 default:
1627 rctl |= E1000_RCTL_SZ_2048;
1628 rctl &= ~E1000_RCTL_BSEX;
1629 break;
1630 case IGB_RXBUFFER_4096:
1631 rctl |= E1000_RCTL_SZ_4096;
1632 break;
1633 case IGB_RXBUFFER_8192:
1634 rctl |= E1000_RCTL_SZ_8192;
1635 break;
1636 case IGB_RXBUFFER_16384:
1637 rctl |= E1000_RCTL_SZ_16384;
1638 break;
1639 }
1640 } else {
1641 rctl &= ~E1000_RCTL_BSEX;
1642 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1643 }
1644
1645 /* 82575 and greater support packet-split where the protocol
1646 * header is placed in skb->data and the packet data is
1647 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1648 * In the case of a non-split, skb->data is linearly filled,
1649 * followed by the page buffers. Therefore, skb->data is
1650 * sized to hold the largest protocol header.
1651 */
1652 /* allocations using alloc_page take too long for regular MTU
1653 * so only enable packet split for jumbo frames */
1654 if (rctl & E1000_RCTL_LPE) {
1655 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1656 srrctl = adapter->rx_ps_hdr_size <<
1657 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1658 /* buffer size is ALWAYS one page */
1659 srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1660 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1661 } else {
1662 adapter->rx_ps_hdr_size = 0;
1663 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1664 }
1665
1666 for (i = 0; i < adapter->num_rx_queues; i++)
1667 wr32(E1000_SRRCTL(i), srrctl);
1668
1669 wr32(E1000_RCTL, rctl);
1670}
1671
1672/**
1673 * igb_configure_rx - Configure receive Unit after Reset
1674 * @adapter: board private structure
1675 *
1676 * Configure the Rx unit of the MAC after a reset.
1677 **/
1678static void igb_configure_rx(struct igb_adapter *adapter)
1679{
1680 u64 rdba;
1681 struct e1000_hw *hw = &adapter->hw;
1682 u32 rctl, rxcsum;
1683 u32 rxdctl;
1684 int i;
1685
1686 /* disable receives while setting up the descriptors */
1687 rctl = rd32(E1000_RCTL);
1688 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1689 wrfl();
1690 mdelay(10);
1691
1692 if (adapter->itr_setting > 3)
1693 wr32(E1000_ITR,
1694 1000000000 / (adapter->itr * 256));
1695
1696 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1697 * the Base and Length of the Rx Descriptor Ring */
1698 for (i = 0; i < adapter->num_rx_queues; i++) {
1699 struct igb_ring *ring = &(adapter->rx_ring[i]);
1700 rdba = ring->dma;
1701 wr32(E1000_RDBAL(i),
1702 rdba & 0x00000000ffffffffULL);
1703 wr32(E1000_RDBAH(i), rdba >> 32);
1704 wr32(E1000_RDLEN(i),
1705 ring->count * sizeof(union e1000_adv_rx_desc));
1706
1707 ring->head = E1000_RDH(i);
1708 ring->tail = E1000_RDT(i);
1709 writel(0, hw->hw_addr + ring->tail);
1710 writel(0, hw->hw_addr + ring->head);
1711
1712 rxdctl = rd32(E1000_RXDCTL(i));
1713 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1714 rxdctl &= 0xFFF00000;
1715 rxdctl |= IGB_RX_PTHRESH;
1716 rxdctl |= IGB_RX_HTHRESH << 8;
1717 rxdctl |= IGB_RX_WTHRESH << 16;
1718 wr32(E1000_RXDCTL(i), rxdctl);
1719 }
1720
1721 if (adapter->num_rx_queues > 1) {
1722 u32 random[10];
1723 u32 mrqc;
1724 u32 j, shift;
1725 union e1000_reta {
1726 u32 dword;
1727 u8 bytes[4];
1728 } reta;
1729
1730 get_random_bytes(&random[0], 40);
1731
1732 shift = 6;
1733 for (j = 0; j < (32 * 4); j++) {
1734 reta.bytes[j & 3] =
1735 (j % adapter->num_rx_queues) << shift;
1736 if ((j & 3) == 3)
1737 writel(reta.dword,
1738 hw->hw_addr + E1000_RETA(0) + (j & ~3));
1739 }
1740 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1741
1742 /* Fill out hash function seeds */
1743 for (j = 0; j < 10; j++)
1744 array_wr32(E1000_RSSRK(0), j, random[j]);
1745
1746 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1747 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1748 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
1749 E1000_MRQC_RSS_FIELD_IPV6_TCP);
1750 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
1751 E1000_MRQC_RSS_FIELD_IPV6_UDP);
1752 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
1753 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
1754
1755
1756 wr32(E1000_MRQC, mrqc);
1757
1758 /* Multiqueue and raw packet checksumming are mutually
1759 * exclusive. Note that this not the same as TCP/IP
1760 * checksumming, which works fine. */
1761 rxcsum = rd32(E1000_RXCSUM);
1762 rxcsum |= E1000_RXCSUM_PCSD;
1763 wr32(E1000_RXCSUM, rxcsum);
1764 } else {
1765 /* Enable Receive Checksum Offload for TCP and UDP */
1766 rxcsum = rd32(E1000_RXCSUM);
1767 if (adapter->rx_csum) {
1768 rxcsum |= E1000_RXCSUM_TUOFL;
1769
1770 /* Enable IPv4 payload checksum for UDP fragments
1771 * Must be used in conjunction with packet-split. */
1772 if (adapter->rx_ps_hdr_size)
1773 rxcsum |= E1000_RXCSUM_IPPCSE;
1774 } else {
1775 rxcsum &= ~E1000_RXCSUM_TUOFL;
1776 /* don't need to clear IPPCSE as it defaults to 0 */
1777 }
1778 wr32(E1000_RXCSUM, rxcsum);
1779 }
1780
1781 if (adapter->vlgrp)
1782 wr32(E1000_RLPML,
1783 adapter->max_frame_size + VLAN_TAG_SIZE);
1784 else
1785 wr32(E1000_RLPML, adapter->max_frame_size);
1786
1787 /* Enable Receives */
1788 wr32(E1000_RCTL, rctl);
1789}
1790
1791/**
1792 * igb_free_tx_resources - Free Tx Resources per Queue
1793 * @adapter: board private structure
1794 * @tx_ring: Tx descriptor ring for a specific queue
1795 *
1796 * Free all transmit software resources
1797 **/
1798static void igb_free_tx_resources(struct igb_adapter *adapter,
1799 struct igb_ring *tx_ring)
1800{
1801 struct pci_dev *pdev = adapter->pdev;
1802
1803 igb_clean_tx_ring(adapter, tx_ring);
1804
1805 vfree(tx_ring->buffer_info);
1806 tx_ring->buffer_info = NULL;
1807
1808 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1809
1810 tx_ring->desc = NULL;
1811}
1812
1813/**
1814 * igb_free_all_tx_resources - Free Tx Resources for All Queues
1815 * @adapter: board private structure
1816 *
1817 * Free all transmit software resources
1818 **/
1819static void igb_free_all_tx_resources(struct igb_adapter *adapter)
1820{
1821 int i;
1822
1823 for (i = 0; i < adapter->num_tx_queues; i++)
1824 igb_free_tx_resources(adapter, &adapter->tx_ring[i]);
1825}
1826
1827static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
1828 struct igb_buffer *buffer_info)
1829{
1830 if (buffer_info->dma) {
1831 pci_unmap_page(adapter->pdev,
1832 buffer_info->dma,
1833 buffer_info->length,
1834 PCI_DMA_TODEVICE);
1835 buffer_info->dma = 0;
1836 }
1837 if (buffer_info->skb) {
1838 dev_kfree_skb_any(buffer_info->skb);
1839 buffer_info->skb = NULL;
1840 }
1841 buffer_info->time_stamp = 0;
1842 /* buffer_info must be completely set up in the transmit path */
1843}
1844
1845/**
1846 * igb_clean_tx_ring - Free Tx Buffers
1847 * @adapter: board private structure
1848 * @tx_ring: ring to be cleaned
1849 **/
1850static void igb_clean_tx_ring(struct igb_adapter *adapter,
1851 struct igb_ring *tx_ring)
1852{
1853 struct igb_buffer *buffer_info;
1854 unsigned long size;
1855 unsigned int i;
1856
1857 if (!tx_ring->buffer_info)
1858 return;
1859 /* Free all the Tx ring sk_buffs */
1860
1861 for (i = 0; i < tx_ring->count; i++) {
1862 buffer_info = &tx_ring->buffer_info[i];
1863 igb_unmap_and_free_tx_resource(adapter, buffer_info);
1864 }
1865
1866 size = sizeof(struct igb_buffer) * tx_ring->count;
1867 memset(tx_ring->buffer_info, 0, size);
1868
1869 /* Zero out the descriptor ring */
1870
1871 memset(tx_ring->desc, 0, tx_ring->size);
1872
1873 tx_ring->next_to_use = 0;
1874 tx_ring->next_to_clean = 0;
1875
1876 writel(0, adapter->hw.hw_addr + tx_ring->head);
1877 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1878}
1879
1880/**
1881 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
1882 * @adapter: board private structure
1883 **/
1884static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
1885{
1886 int i;
1887
1888 for (i = 0; i < adapter->num_tx_queues; i++)
1889 igb_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1890}
1891
1892/**
1893 * igb_free_rx_resources - Free Rx Resources
1894 * @adapter: board private structure
1895 * @rx_ring: ring to clean the resources from
1896 *
1897 * Free all receive software resources
1898 **/
1899static void igb_free_rx_resources(struct igb_adapter *adapter,
1900 struct igb_ring *rx_ring)
1901{
1902 struct pci_dev *pdev = adapter->pdev;
1903
1904 igb_clean_rx_ring(adapter, rx_ring);
1905
1906 vfree(rx_ring->buffer_info);
1907 rx_ring->buffer_info = NULL;
1908
1909 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1910
1911 rx_ring->desc = NULL;
1912}
1913
1914/**
1915 * igb_free_all_rx_resources - Free Rx Resources for All Queues
1916 * @adapter: board private structure
1917 *
1918 * Free all receive software resources
1919 **/
1920static void igb_free_all_rx_resources(struct igb_adapter *adapter)
1921{
1922 int i;
1923
1924 for (i = 0; i < adapter->num_rx_queues; i++)
1925 igb_free_rx_resources(adapter, &adapter->rx_ring[i]);
1926}
1927
1928/**
1929 * igb_clean_rx_ring - Free Rx Buffers per Queue
1930 * @adapter: board private structure
1931 * @rx_ring: ring to free buffers from
1932 **/
1933static void igb_clean_rx_ring(struct igb_adapter *adapter,
1934 struct igb_ring *rx_ring)
1935{
1936 struct igb_buffer *buffer_info;
1937 struct pci_dev *pdev = adapter->pdev;
1938 unsigned long size;
1939 unsigned int i;
1940
1941 if (!rx_ring->buffer_info)
1942 return;
1943 /* Free all the Rx ring sk_buffs */
1944 for (i = 0; i < rx_ring->count; i++) {
1945 buffer_info = &rx_ring->buffer_info[i];
1946 if (buffer_info->dma) {
1947 if (adapter->rx_ps_hdr_size)
1948 pci_unmap_single(pdev, buffer_info->dma,
1949 adapter->rx_ps_hdr_size,
1950 PCI_DMA_FROMDEVICE);
1951 else
1952 pci_unmap_single(pdev, buffer_info->dma,
1953 adapter->rx_buffer_len,
1954 PCI_DMA_FROMDEVICE);
1955 buffer_info->dma = 0;
1956 }
1957
1958 if (buffer_info->skb) {
1959 dev_kfree_skb(buffer_info->skb);
1960 buffer_info->skb = NULL;
1961 }
1962 if (buffer_info->page) {
1963 pci_unmap_page(pdev, buffer_info->page_dma,
1964 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1965 put_page(buffer_info->page);
1966 buffer_info->page = NULL;
1967 buffer_info->page_dma = 0;
1968 }
1969 }
1970
1971 /* there also may be some cached data from a chained receive */
1972 if (rx_ring->pending_skb) {
1973 dev_kfree_skb(rx_ring->pending_skb);
1974 rx_ring->pending_skb = NULL;
1975 }
1976
1977 size = sizeof(struct igb_buffer) * rx_ring->count;
1978 memset(rx_ring->buffer_info, 0, size);
1979
1980 /* Zero out the descriptor ring */
1981 memset(rx_ring->desc, 0, rx_ring->size);
1982
1983 rx_ring->next_to_clean = 0;
1984 rx_ring->next_to_use = 0;
1985
1986 writel(0, adapter->hw.hw_addr + rx_ring->head);
1987 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1988}
1989
1990/**
1991 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
1992 * @adapter: board private structure
1993 **/
1994static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
1995{
1996 int i;
1997
1998 for (i = 0; i < adapter->num_rx_queues; i++)
1999 igb_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2000}
2001
2002/**
2003 * igb_set_mac - Change the Ethernet Address of the NIC
2004 * @netdev: network interface device structure
2005 * @p: pointer to an address structure
2006 *
2007 * Returns 0 on success, negative on failure
2008 **/
2009static int igb_set_mac(struct net_device *netdev, void *p)
2010{
2011 struct igb_adapter *adapter = netdev_priv(netdev);
2012 struct sockaddr *addr = p;
2013
2014 if (!is_valid_ether_addr(addr->sa_data))
2015 return -EADDRNOTAVAIL;
2016
2017 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2018 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2019
2020 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2021
2022 return 0;
2023}
2024
2025/**
2026 * igb_set_multi - Multicast and Promiscuous mode set
2027 * @netdev: network interface device structure
2028 *
2029 * The set_multi entry point is called whenever the multicast address
2030 * list or the network interface flags are updated. This routine is
2031 * responsible for configuring the hardware for proper multicast,
2032 * promiscuous mode, and all-multi behavior.
2033 **/
2034static void igb_set_multi(struct net_device *netdev)
2035{
2036 struct igb_adapter *adapter = netdev_priv(netdev);
2037 struct e1000_hw *hw = &adapter->hw;
2038 struct e1000_mac_info *mac = &hw->mac;
2039 struct dev_mc_list *mc_ptr;
2040 u8 *mta_list;
2041 u32 rctl;
2042 int i;
2043
2044 /* Check for Promiscuous and All Multicast modes */
2045
2046 rctl = rd32(E1000_RCTL);
2047
2048 if (netdev->flags & IFF_PROMISC)
2049 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2050 else if (netdev->flags & IFF_ALLMULTI) {
2051 rctl |= E1000_RCTL_MPE;
2052 rctl &= ~E1000_RCTL_UPE;
2053 } else
2054 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2055
2056 wr32(E1000_RCTL, rctl);
2057
2058 if (!netdev->mc_count) {
2059 /* nothing to program, so clear mc list */
2060 igb_update_mc_addr_list(hw, NULL, 0, 1,
2061 mac->rar_entry_count);
2062 return;
2063 }
2064
2065 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2066 if (!mta_list)
2067 return;
2068
2069 /* The shared function expects a packed array of only addresses. */
2070 mc_ptr = netdev->mc_list;
2071
2072 for (i = 0; i < netdev->mc_count; i++) {
2073 if (!mc_ptr)
2074 break;
2075 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2076 mc_ptr = mc_ptr->next;
2077 }
2078 igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
2079 kfree(mta_list);
2080}
2081
2082/* Need to wait a few seconds after link up to get diagnostic information from
2083 * the phy */
2084static void igb_update_phy_info(unsigned long data)
2085{
2086 struct igb_adapter *adapter = (struct igb_adapter *) data;
2087 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2088}
2089
2090/**
2091 * igb_watchdog - Timer Call-back
2092 * @data: pointer to adapter cast into an unsigned long
2093 **/
2094static void igb_watchdog(unsigned long data)
2095{
2096 struct igb_adapter *adapter = (struct igb_adapter *)data;
2097 /* Do the rest outside of interrupt context */
2098 schedule_work(&adapter->watchdog_task);
2099}
2100
2101static void igb_watchdog_task(struct work_struct *work)
2102{
2103 struct igb_adapter *adapter = container_of(work,
2104 struct igb_adapter, watchdog_task);
2105 struct e1000_hw *hw = &adapter->hw;
2106
2107 struct net_device *netdev = adapter->netdev;
2108 struct igb_ring *tx_ring = adapter->tx_ring;
2109 struct e1000_mac_info *mac = &adapter->hw.mac;
2110 u32 link;
2111 s32 ret_val;
2112
2113 if ((netif_carrier_ok(netdev)) &&
2114 (rd32(E1000_STATUS) & E1000_STATUS_LU))
2115 goto link_up;
2116
2117 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2118 if ((ret_val == E1000_ERR_PHY) &&
2119 (hw->phy.type == e1000_phy_igp_3) &&
2120 (rd32(E1000_CTRL) &
2121 E1000_PHY_CTRL_GBE_DISABLE))
2122 dev_info(&adapter->pdev->dev,
2123 "Gigabit has been disabled, downgrading speed\n");
2124
2125 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2126 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2127 link = mac->serdes_has_link;
2128 else
2129 link = rd32(E1000_STATUS) &
2130 E1000_STATUS_LU;
2131
2132 if (link) {
2133 if (!netif_carrier_ok(netdev)) {
2134 u32 ctrl;
2135 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2136 &adapter->link_speed,
2137 &adapter->link_duplex);
2138
2139 ctrl = rd32(E1000_CTRL);
2140 dev_info(&adapter->pdev->dev,
2141 "NIC Link is Up %d Mbps %s, "
2142 "Flow Control: %s\n",
2143 adapter->link_speed,
2144 adapter->link_duplex == FULL_DUPLEX ?
2145 "Full Duplex" : "Half Duplex",
2146 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2147 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2148 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2149 E1000_CTRL_TFCE) ? "TX" : "None")));
2150
2151 /* tweak tx_queue_len according to speed/duplex and
2152 * adjust the timeout factor */
2153 netdev->tx_queue_len = adapter->tx_queue_len;
2154 adapter->tx_timeout_factor = 1;
2155 switch (adapter->link_speed) {
2156 case SPEED_10:
2157 netdev->tx_queue_len = 10;
2158 adapter->tx_timeout_factor = 14;
2159 break;
2160 case SPEED_100:
2161 netdev->tx_queue_len = 100;
2162 /* maybe add some timeout factor ? */
2163 break;
2164 }
2165
2166 netif_carrier_on(netdev);
2167 netif_wake_queue(netdev);
2168
2169 if (!test_bit(__IGB_DOWN, &adapter->state))
2170 mod_timer(&adapter->phy_info_timer,
2171 round_jiffies(jiffies + 2 * HZ));
2172 }
2173 } else {
2174 if (netif_carrier_ok(netdev)) {
2175 adapter->link_speed = 0;
2176 adapter->link_duplex = 0;
2177 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2178 netif_carrier_off(netdev);
2179 netif_stop_queue(netdev);
2180 if (!test_bit(__IGB_DOWN, &adapter->state))
2181 mod_timer(&adapter->phy_info_timer,
2182 round_jiffies(jiffies + 2 * HZ));
2183 }
2184 }
2185
2186link_up:
2187 igb_update_stats(adapter);
2188
2189 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2190 adapter->tpt_old = adapter->stats.tpt;
2191 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2192 adapter->colc_old = adapter->stats.colc;
2193
2194 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2195 adapter->gorc_old = adapter->stats.gorc;
2196 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2197 adapter->gotc_old = adapter->stats.gotc;
2198
2199 igb_update_adaptive(&adapter->hw);
2200
2201 if (!netif_carrier_ok(netdev)) {
2202 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2203 /* We've lost link, so the controller stops DMA,
2204 * but we've got queued Tx work that's never going
2205 * to get done, so reset controller to flush Tx.
2206 * (Do the reset outside of interrupt context). */
2207 adapter->tx_timeout_count++;
2208 schedule_work(&adapter->reset_task);
2209 }
2210 }
2211
2212 /* Cause software interrupt to ensure rx ring is cleaned */
2213 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2214
2215 /* Force detection of hung controller every watchdog period */
2216 tx_ring->detect_tx_hung = true;
2217
2218 /* Reset the timer */
2219 if (!test_bit(__IGB_DOWN, &adapter->state))
2220 mod_timer(&adapter->watchdog_timer,
2221 round_jiffies(jiffies + 2 * HZ));
2222}
2223
2224enum latency_range {
2225 lowest_latency = 0,
2226 low_latency = 1,
2227 bulk_latency = 2,
2228 latency_invalid = 255
2229};
2230
2231
2232static void igb_lower_rx_eitr(struct igb_adapter *adapter,
2233 struct igb_ring *rx_ring)
2234{
2235 struct e1000_hw *hw = &adapter->hw;
2236 int new_val;
2237
2238 new_val = rx_ring->itr_val / 2;
2239 if (new_val < IGB_MIN_DYN_ITR)
2240 new_val = IGB_MIN_DYN_ITR;
2241
2242 if (new_val != rx_ring->itr_val) {
2243 rx_ring->itr_val = new_val;
2244 wr32(rx_ring->itr_register,
2245 1000000000 / (new_val * 256));
2246 }
2247}
2248
2249static void igb_raise_rx_eitr(struct igb_adapter *adapter,
2250 struct igb_ring *rx_ring)
2251{
2252 struct e1000_hw *hw = &adapter->hw;
2253 int new_val;
2254
2255 new_val = rx_ring->itr_val * 2;
2256 if (new_val > IGB_MAX_DYN_ITR)
2257 new_val = IGB_MAX_DYN_ITR;
2258
2259 if (new_val != rx_ring->itr_val) {
2260 rx_ring->itr_val = new_val;
2261 wr32(rx_ring->itr_register,
2262 1000000000 / (new_val * 256));
2263 }
2264}
2265
2266/**
2267 * igb_update_itr - update the dynamic ITR value based on statistics
2268 * Stores a new ITR value based on packets and byte
2269 * counts during the last interrupt. The advantage of per interrupt
2270 * computation is faster updates and more accurate ITR for the current
2271 * traffic pattern. Constants in this function were computed
2272 * based on theoretical maximum wire speed and thresholds were set based
2273 * on testing data as well as attempting to minimize response time
2274 * while increasing bulk throughput.
2275 * this functionality is controlled by the InterruptThrottleRate module
2276 * parameter (see igb_param.c)
2277 * NOTE: These calculations are only valid when operating in a single-
2278 * queue environment.
2279 * @adapter: pointer to adapter
2280 * @itr_setting: current adapter->itr
2281 * @packets: the number of packets during this measurement interval
2282 * @bytes: the number of bytes during this measurement interval
2283 **/
2284static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2285 int packets, int bytes)
2286{
2287 unsigned int retval = itr_setting;
2288
2289 if (packets == 0)
2290 goto update_itr_done;
2291
2292 switch (itr_setting) {
2293 case lowest_latency:
2294 /* handle TSO and jumbo frames */
2295 if (bytes/packets > 8000)
2296 retval = bulk_latency;
2297 else if ((packets < 5) && (bytes > 512))
2298 retval = low_latency;
2299 break;
2300 case low_latency: /* 50 usec aka 20000 ints/s */
2301 if (bytes > 10000) {
2302 /* this if handles the TSO accounting */
2303 if (bytes/packets > 8000) {
2304 retval = bulk_latency;
2305 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2306 retval = bulk_latency;
2307 } else if ((packets > 35)) {
2308 retval = lowest_latency;
2309 }
2310 } else if (bytes/packets > 2000) {
2311 retval = bulk_latency;
2312 } else if (packets <= 2 && bytes < 512) {
2313 retval = lowest_latency;
2314 }
2315 break;
2316 case bulk_latency: /* 250 usec aka 4000 ints/s */
2317 if (bytes > 25000) {
2318 if (packets > 35)
2319 retval = low_latency;
2320 } else if (bytes < 6000) {
2321 retval = low_latency;
2322 }
2323 break;
2324 }
2325
2326update_itr_done:
2327 return retval;
2328}
2329
2330static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2331 int rx_only)
2332{
2333 u16 current_itr;
2334 u32 new_itr = adapter->itr;
2335
2336 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2337 if (adapter->link_speed != SPEED_1000) {
2338 current_itr = 0;
2339 new_itr = 4000;
2340 goto set_itr_now;
2341 }
2342
2343 adapter->rx_itr = igb_update_itr(adapter,
2344 adapter->rx_itr,
2345 adapter->rx_ring->total_packets,
2346 adapter->rx_ring->total_bytes);
2347 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2348 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2349 adapter->rx_itr = low_latency;
2350
2351 if (!rx_only) {
2352 adapter->tx_itr = igb_update_itr(adapter,
2353 adapter->tx_itr,
2354 adapter->tx_ring->total_packets,
2355 adapter->tx_ring->total_bytes);
2356 /* conservative mode (itr 3) eliminates the
2357 * lowest_latency setting */
2358 if (adapter->itr_setting == 3 &&
2359 adapter->tx_itr == lowest_latency)
2360 adapter->tx_itr = low_latency;
2361
2362 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2363 } else {
2364 current_itr = adapter->rx_itr;
2365 }
2366
2367 switch (current_itr) {
2368 /* counts and packets in update_itr are dependent on these numbers */
2369 case lowest_latency:
2370 new_itr = 70000;
2371 break;
2372 case low_latency:
2373 new_itr = 20000; /* aka hwitr = ~200 */
2374 break;
2375 case bulk_latency:
2376 new_itr = 4000;
2377 break;
2378 default:
2379 break;
2380 }
2381
2382set_itr_now:
2383 if (new_itr != adapter->itr) {
2384 /* this attempts to bias the interrupt rate towards Bulk
2385 * by adding intermediate steps when interrupt rate is
2386 * increasing */
2387 new_itr = new_itr > adapter->itr ?
2388 min(adapter->itr + (new_itr >> 2), new_itr) :
2389 new_itr;
2390 /* Don't write the value here; it resets the adapter's
2391 * internal timer, and causes us to delay far longer than
2392 * we should between interrupts. Instead, we write the ITR
2393 * value at the beginning of the next interrupt so the timing
2394 * ends up being correct.
2395 */
2396 adapter->itr = new_itr;
2397 adapter->set_itr = 1;
2398 }
2399
2400 return;
2401}
2402
2403
2404#define IGB_TX_FLAGS_CSUM 0x00000001
2405#define IGB_TX_FLAGS_VLAN 0x00000002
2406#define IGB_TX_FLAGS_TSO 0x00000004
2407#define IGB_TX_FLAGS_IPV4 0x00000008
2408#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2409#define IGB_TX_FLAGS_VLAN_SHIFT 16
2410
2411static inline int igb_tso_adv(struct igb_adapter *adapter,
2412 struct igb_ring *tx_ring,
2413 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2414{
2415 struct e1000_adv_tx_context_desc *context_desc;
2416 unsigned int i;
2417 int err;
2418 struct igb_buffer *buffer_info;
2419 u32 info = 0, tu_cmd = 0;
2420 u32 mss_l4len_idx, l4len;
2421 *hdr_len = 0;
2422
2423 if (skb_header_cloned(skb)) {
2424 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2425 if (err)
2426 return err;
2427 }
2428
2429 l4len = tcp_hdrlen(skb);
2430 *hdr_len += l4len;
2431
2432 if (skb->protocol == htons(ETH_P_IP)) {
2433 struct iphdr *iph = ip_hdr(skb);
2434 iph->tot_len = 0;
2435 iph->check = 0;
2436 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2437 iph->daddr, 0,
2438 IPPROTO_TCP,
2439 0);
2440 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2441 ipv6_hdr(skb)->payload_len = 0;
2442 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2443 &ipv6_hdr(skb)->daddr,
2444 0, IPPROTO_TCP, 0);
2445 }
2446
2447 i = tx_ring->next_to_use;
2448
2449 buffer_info = &tx_ring->buffer_info[i];
2450 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2451 /* VLAN MACLEN IPLEN */
2452 if (tx_flags & IGB_TX_FLAGS_VLAN)
2453 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2454 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2455 *hdr_len += skb_network_offset(skb);
2456 info |= skb_network_header_len(skb);
2457 *hdr_len += skb_network_header_len(skb);
2458 context_desc->vlan_macip_lens = cpu_to_le32(info);
2459
2460 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2461 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2462
2463 if (skb->protocol == htons(ETH_P_IP))
2464 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2465 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2466
2467 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2468
2469 /* MSS L4LEN IDX */
2470 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2471 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2472
2473 /* Context index must be unique per ring. Luckily, so is the interrupt
2474 * mask value. */
2475 mss_l4len_idx |= tx_ring->eims_value >> 4;
2476
2477 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2478 context_desc->seqnum_seed = 0;
2479
2480 buffer_info->time_stamp = jiffies;
2481 buffer_info->dma = 0;
2482 i++;
2483 if (i == tx_ring->count)
2484 i = 0;
2485
2486 tx_ring->next_to_use = i;
2487
2488 return true;
2489}
2490
2491static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2492 struct igb_ring *tx_ring,
2493 struct sk_buff *skb, u32 tx_flags)
2494{
2495 struct e1000_adv_tx_context_desc *context_desc;
2496 unsigned int i;
2497 struct igb_buffer *buffer_info;
2498 u32 info = 0, tu_cmd = 0;
2499
2500 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2501 (tx_flags & IGB_TX_FLAGS_VLAN)) {
2502 i = tx_ring->next_to_use;
2503 buffer_info = &tx_ring->buffer_info[i];
2504 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2505
2506 if (tx_flags & IGB_TX_FLAGS_VLAN)
2507 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2508 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2509 if (skb->ip_summed == CHECKSUM_PARTIAL)
2510 info |= skb_network_header_len(skb);
2511
2512 context_desc->vlan_macip_lens = cpu_to_le32(info);
2513
2514 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2515
2516 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2517 if (skb->protocol == htons(ETH_P_IP))
2518 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2519 if (skb->sk && (skb->sk->sk_protocol == IPPROTO_TCP))
2520 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2521 }
2522
2523 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2524 context_desc->seqnum_seed = 0;
2525 context_desc->mss_l4len_idx =
2526 cpu_to_le32(tx_ring->eims_value >> 4);
2527
2528 buffer_info->time_stamp = jiffies;
2529 buffer_info->dma = 0;
2530
2531 i++;
2532 if (i == tx_ring->count)
2533 i = 0;
2534 tx_ring->next_to_use = i;
2535
2536 return true;
2537 }
2538
2539
2540 return false;
2541}
2542
2543#define IGB_MAX_TXD_PWR 16
2544#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2545
2546static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2547 struct igb_ring *tx_ring,
2548 struct sk_buff *skb)
2549{
2550 struct igb_buffer *buffer_info;
2551 unsigned int len = skb_headlen(skb);
2552 unsigned int count = 0, i;
2553 unsigned int f;
2554
2555 i = tx_ring->next_to_use;
2556
2557 buffer_info = &tx_ring->buffer_info[i];
2558 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2559 buffer_info->length = len;
2560 /* set time_stamp *before* dma to help avoid a possible race */
2561 buffer_info->time_stamp = jiffies;
2562 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2563 PCI_DMA_TODEVICE);
2564 count++;
2565 i++;
2566 if (i == tx_ring->count)
2567 i = 0;
2568
2569 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2570 struct skb_frag_struct *frag;
2571
2572 frag = &skb_shinfo(skb)->frags[f];
2573 len = frag->size;
2574
2575 buffer_info = &tx_ring->buffer_info[i];
2576 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2577 buffer_info->length = len;
2578 buffer_info->time_stamp = jiffies;
2579 buffer_info->dma = pci_map_page(adapter->pdev,
2580 frag->page,
2581 frag->page_offset,
2582 len,
2583 PCI_DMA_TODEVICE);
2584
2585 count++;
2586 i++;
2587 if (i == tx_ring->count)
2588 i = 0;
2589 }
2590
2591 i = (i == 0) ? tx_ring->count - 1 : i - 1;
2592 tx_ring->buffer_info[i].skb = skb;
2593
2594 return count;
2595}
2596
2597static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2598 struct igb_ring *tx_ring,
2599 int tx_flags, int count, u32 paylen,
2600 u8 hdr_len)
2601{
2602 union e1000_adv_tx_desc *tx_desc = NULL;
2603 struct igb_buffer *buffer_info;
2604 u32 olinfo_status = 0, cmd_type_len;
2605 unsigned int i;
2606
2607 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2608 E1000_ADVTXD_DCMD_DEXT);
2609
2610 if (tx_flags & IGB_TX_FLAGS_VLAN)
2611 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2612
2613 if (tx_flags & IGB_TX_FLAGS_TSO) {
2614 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2615
2616 /* insert tcp checksum */
2617 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2618
2619 /* insert ip checksum */
2620 if (tx_flags & IGB_TX_FLAGS_IPV4)
2621 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2622
2623 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
2624 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2625 }
2626
2627 if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2628 IGB_TX_FLAGS_VLAN))
2629 olinfo_status |= tx_ring->eims_value >> 4;
2630
2631 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2632
2633 i = tx_ring->next_to_use;
2634 while (count--) {
2635 buffer_info = &tx_ring->buffer_info[i];
2636 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
2637 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2638 tx_desc->read.cmd_type_len =
2639 cpu_to_le32(cmd_type_len | buffer_info->length);
2640 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2641 i++;
2642 if (i == tx_ring->count)
2643 i = 0;
2644 }
2645
2646 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2647 /* Force memory writes to complete before letting h/w
2648 * know there are new descriptors to fetch. (Only
2649 * applicable for weak-ordered memory model archs,
2650 * such as IA-64). */
2651 wmb();
2652
2653 tx_ring->next_to_use = i;
2654 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2655 /* we need this if more than one processor can write to our tail
2656 * at a time, it syncronizes IO on IA64/Altix systems */
2657 mmiowb();
2658}
2659
2660static int __igb_maybe_stop_tx(struct net_device *netdev,
2661 struct igb_ring *tx_ring, int size)
2662{
2663 struct igb_adapter *adapter = netdev_priv(netdev);
2664
2665 netif_stop_queue(netdev);
2666 /* Herbert's original patch had:
2667 * smp_mb__after_netif_stop_queue();
2668 * but since that doesn't exist yet, just open code it. */
2669 smp_mb();
2670
2671 /* We need to check again in a case another CPU has just
2672 * made room available. */
2673 if (IGB_DESC_UNUSED(tx_ring) < size)
2674 return -EBUSY;
2675
2676 /* A reprieve! */
2677 netif_start_queue(netdev);
2678 ++adapter->restart_queue;
2679 return 0;
2680}
2681
2682static int igb_maybe_stop_tx(struct net_device *netdev,
2683 struct igb_ring *tx_ring, int size)
2684{
2685 if (IGB_DESC_UNUSED(tx_ring) >= size)
2686 return 0;
2687 return __igb_maybe_stop_tx(netdev, tx_ring, size);
2688}
2689
2690#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2691
2692static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2693 struct net_device *netdev,
2694 struct igb_ring *tx_ring)
2695{
2696 struct igb_adapter *adapter = netdev_priv(netdev);
2697 unsigned int tx_flags = 0;
2698 unsigned int len;
2699 unsigned long irq_flags;
2700 u8 hdr_len = 0;
2701 int tso = 0;
2702
2703 len = skb_headlen(skb);
2704
2705 if (test_bit(__IGB_DOWN, &adapter->state)) {
2706 dev_kfree_skb_any(skb);
2707 return NETDEV_TX_OK;
2708 }
2709
2710 if (skb->len <= 0) {
2711 dev_kfree_skb_any(skb);
2712 return NETDEV_TX_OK;
2713 }
2714
2715 if (!spin_trylock_irqsave(&tx_ring->tx_lock, irq_flags))
2716 /* Collision - tell upper layer to requeue */
2717 return NETDEV_TX_LOCKED;
2718
2719 /* need: 1 descriptor per page,
2720 * + 2 desc gap to keep tail from touching head,
2721 * + 1 desc for skb->data,
2722 * + 1 desc for context descriptor,
2723 * otherwise try next time */
2724 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2725 /* this is a hard error */
2726 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2727 return NETDEV_TX_BUSY;
2728 }
2729
2730 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2731 tx_flags |= IGB_TX_FLAGS_VLAN;
2732 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2733 }
2734
2735 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2736 &hdr_len) : 0;
2737
2738 if (tso < 0) {
2739 dev_kfree_skb_any(skb);
2740 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2741 return NETDEV_TX_OK;
2742 }
2743
2744 if (tso)
2745 tx_flags |= IGB_TX_FLAGS_TSO;
2746 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
2747 if (skb->ip_summed == CHECKSUM_PARTIAL)
2748 tx_flags |= IGB_TX_FLAGS_CSUM;
2749
2750 if (skb->protocol == htons(ETH_P_IP))
2751 tx_flags |= IGB_TX_FLAGS_IPV4;
2752
2753 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2754 igb_tx_map_adv(adapter, tx_ring, skb),
2755 skb->len, hdr_len);
2756
2757 netdev->trans_start = jiffies;
2758
2759 /* Make sure there is space in the ring for the next send. */
2760 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
2761
2762 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2763 return NETDEV_TX_OK;
2764}
2765
2766static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
2767{
2768 struct igb_adapter *adapter = netdev_priv(netdev);
2769 struct igb_ring *tx_ring = &adapter->tx_ring[0];
2770
2771 /* This goes back to the question of how to logically map a tx queue
2772 * to a flow. Right now, performance is impacted slightly negatively
2773 * if using multiple tx queues. If the stack breaks away from a
2774 * single qdisc implementation, we can look at this again. */
2775 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
2776}
2777
2778/**
2779 * igb_tx_timeout - Respond to a Tx Hang
2780 * @netdev: network interface device structure
2781 **/
2782static void igb_tx_timeout(struct net_device *netdev)
2783{
2784 struct igb_adapter *adapter = netdev_priv(netdev);
2785 struct e1000_hw *hw = &adapter->hw;
2786
2787 /* Do the reset outside of interrupt context */
2788 adapter->tx_timeout_count++;
2789 schedule_work(&adapter->reset_task);
2790 wr32(E1000_EICS, adapter->eims_enable_mask &
2791 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
2792}
2793
2794static void igb_reset_task(struct work_struct *work)
2795{
2796 struct igb_adapter *adapter;
2797 adapter = container_of(work, struct igb_adapter, reset_task);
2798
2799 igb_reinit_locked(adapter);
2800}
2801
2802/**
2803 * igb_get_stats - Get System Network Statistics
2804 * @netdev: network interface device structure
2805 *
2806 * Returns the address of the device statistics structure.
2807 * The statistics are actually updated from the timer callback.
2808 **/
2809static struct net_device_stats *
2810igb_get_stats(struct net_device *netdev)
2811{
2812 struct igb_adapter *adapter = netdev_priv(netdev);
2813
2814 /* only return the current stats */
2815 return &adapter->net_stats;
2816}
2817
2818/**
2819 * igb_change_mtu - Change the Maximum Transfer Unit
2820 * @netdev: network interface device structure
2821 * @new_mtu: new value for maximum frame size
2822 *
2823 * Returns 0 on success, negative on failure
2824 **/
2825static int igb_change_mtu(struct net_device *netdev, int new_mtu)
2826{
2827 struct igb_adapter *adapter = netdev_priv(netdev);
2828 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2829
2830 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2831 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2832 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2833 return -EINVAL;
2834 }
2835
2836#define MAX_STD_JUMBO_FRAME_SIZE 9234
2837 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2838 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2839 return -EINVAL;
2840 }
2841
2842 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2843 msleep(1);
2844 /* igb_down has a dependency on max_frame_size */
2845 adapter->max_frame_size = max_frame;
2846 if (netif_running(netdev))
2847 igb_down(adapter);
2848
2849 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2850 * means we reserve 2 more, this pushes us to allocate from the next
2851 * larger slab size.
2852 * i.e. RXBUFFER_2048 --> size-4096 slab
2853 */
2854
2855 if (max_frame <= IGB_RXBUFFER_256)
2856 adapter->rx_buffer_len = IGB_RXBUFFER_256;
2857 else if (max_frame <= IGB_RXBUFFER_512)
2858 adapter->rx_buffer_len = IGB_RXBUFFER_512;
2859 else if (max_frame <= IGB_RXBUFFER_1024)
2860 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
2861 else if (max_frame <= IGB_RXBUFFER_2048)
2862 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
2863 else
2864 adapter->rx_buffer_len = IGB_RXBUFFER_4096;
2865 /* adjust allocation if LPE protects us, and we aren't using SBP */
2866 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2867 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
2868 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2869
2870 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2871 netdev->mtu, new_mtu);
2872 netdev->mtu = new_mtu;
2873
2874 if (netif_running(netdev))
2875 igb_up(adapter);
2876 else
2877 igb_reset(adapter);
2878
2879 clear_bit(__IGB_RESETTING, &adapter->state);
2880
2881 return 0;
2882}
2883
2884/**
2885 * igb_update_stats - Update the board statistics counters
2886 * @adapter: board private structure
2887 **/
2888
2889void igb_update_stats(struct igb_adapter *adapter)
2890{
2891 struct e1000_hw *hw = &adapter->hw;
2892 struct pci_dev *pdev = adapter->pdev;
2893 u16 phy_tmp;
2894
2895#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2896
2897 /*
2898 * Prevent stats update while adapter is being reset, or if the pci
2899 * connection is down.
2900 */
2901 if (adapter->link_speed == 0)
2902 return;
2903 if (pci_channel_offline(pdev))
2904 return;
2905
2906 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
2907 adapter->stats.gprc += rd32(E1000_GPRC);
2908 adapter->stats.gorc += rd32(E1000_GORCL);
2909 rd32(E1000_GORCH); /* clear GORCL */
2910 adapter->stats.bprc += rd32(E1000_BPRC);
2911 adapter->stats.mprc += rd32(E1000_MPRC);
2912 adapter->stats.roc += rd32(E1000_ROC);
2913
2914 adapter->stats.prc64 += rd32(E1000_PRC64);
2915 adapter->stats.prc127 += rd32(E1000_PRC127);
2916 adapter->stats.prc255 += rd32(E1000_PRC255);
2917 adapter->stats.prc511 += rd32(E1000_PRC511);
2918 adapter->stats.prc1023 += rd32(E1000_PRC1023);
2919 adapter->stats.prc1522 += rd32(E1000_PRC1522);
2920 adapter->stats.symerrs += rd32(E1000_SYMERRS);
2921 adapter->stats.sec += rd32(E1000_SEC);
2922
2923 adapter->stats.mpc += rd32(E1000_MPC);
2924 adapter->stats.scc += rd32(E1000_SCC);
2925 adapter->stats.ecol += rd32(E1000_ECOL);
2926 adapter->stats.mcc += rd32(E1000_MCC);
2927 adapter->stats.latecol += rd32(E1000_LATECOL);
2928 adapter->stats.dc += rd32(E1000_DC);
2929 adapter->stats.rlec += rd32(E1000_RLEC);
2930 adapter->stats.xonrxc += rd32(E1000_XONRXC);
2931 adapter->stats.xontxc += rd32(E1000_XONTXC);
2932 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
2933 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
2934 adapter->stats.fcruc += rd32(E1000_FCRUC);
2935 adapter->stats.gptc += rd32(E1000_GPTC);
2936 adapter->stats.gotc += rd32(E1000_GOTCL);
2937 rd32(E1000_GOTCH); /* clear GOTCL */
2938 adapter->stats.rnbc += rd32(E1000_RNBC);
2939 adapter->stats.ruc += rd32(E1000_RUC);
2940 adapter->stats.rfc += rd32(E1000_RFC);
2941 adapter->stats.rjc += rd32(E1000_RJC);
2942 adapter->stats.tor += rd32(E1000_TORH);
2943 adapter->stats.tot += rd32(E1000_TOTH);
2944 adapter->stats.tpr += rd32(E1000_TPR);
2945
2946 adapter->stats.ptc64 += rd32(E1000_PTC64);
2947 adapter->stats.ptc127 += rd32(E1000_PTC127);
2948 adapter->stats.ptc255 += rd32(E1000_PTC255);
2949 adapter->stats.ptc511 += rd32(E1000_PTC511);
2950 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
2951 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
2952
2953 adapter->stats.mptc += rd32(E1000_MPTC);
2954 adapter->stats.bptc += rd32(E1000_BPTC);
2955
2956 /* used for adaptive IFS */
2957
2958 hw->mac.tx_packet_delta = rd32(E1000_TPT);
2959 adapter->stats.tpt += hw->mac.tx_packet_delta;
2960 hw->mac.collision_delta = rd32(E1000_COLC);
2961 adapter->stats.colc += hw->mac.collision_delta;
2962
2963 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
2964 adapter->stats.rxerrc += rd32(E1000_RXERRC);
2965 adapter->stats.tncrs += rd32(E1000_TNCRS);
2966 adapter->stats.tsctc += rd32(E1000_TSCTC);
2967 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
2968
2969 adapter->stats.iac += rd32(E1000_IAC);
2970 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
2971 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
2972 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
2973 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
2974 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
2975 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
2976 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
2977 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
2978
2979 /* Fill out the OS statistics structure */
2980 adapter->net_stats.multicast = adapter->stats.mprc;
2981 adapter->net_stats.collisions = adapter->stats.colc;
2982
2983 /* Rx Errors */
2984
2985 /* RLEC on some newer hardware can be incorrect so build
2986 * our own version based on RUC and ROC */
2987 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2988 adapter->stats.crcerrs + adapter->stats.algnerrc +
2989 adapter->stats.ruc + adapter->stats.roc +
2990 adapter->stats.cexterr;
2991 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
2992 adapter->stats.roc;
2993 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2994 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2995 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2996
2997 /* Tx Errors */
2998 adapter->net_stats.tx_errors = adapter->stats.ecol +
2999 adapter->stats.latecol;
3000 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3001 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3002 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3003
3004 /* Tx Dropped needs to be maintained elsewhere */
3005
3006 /* Phy Stats */
3007 if (hw->phy.media_type == e1000_media_type_copper) {
3008 if ((adapter->link_speed == SPEED_1000) &&
3009 (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
3010 &phy_tmp))) {
3011 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3012 adapter->phy_stats.idle_errors += phy_tmp;
3013 }
3014 }
3015
3016 /* Management Stats */
3017 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3018 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3019 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3020}
3021
3022
3023static irqreturn_t igb_msix_other(int irq, void *data)
3024{
3025 struct net_device *netdev = data;
3026 struct igb_adapter *adapter = netdev_priv(netdev);
3027 struct e1000_hw *hw = &adapter->hw;
3028 u32 eicr;
3029 /* disable interrupts from the "other" bit, avoid re-entry */
3030 wr32(E1000_EIMC, E1000_EIMS_OTHER);
3031
3032 eicr = rd32(E1000_EICR);
3033
3034 if (eicr & E1000_EIMS_OTHER) {
3035 u32 icr = rd32(E1000_ICR);
3036 /* reading ICR causes bit 31 of EICR to be cleared */
3037 if (!(icr & E1000_ICR_LSC))
3038 goto no_link_interrupt;
3039 hw->mac.get_link_status = 1;
3040 /* guard against interrupt when we're going down */
3041 if (!test_bit(__IGB_DOWN, &adapter->state))
3042 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3043 }
3044
3045no_link_interrupt:
3046 wr32(E1000_IMS, E1000_IMS_LSC);
3047 wr32(E1000_EIMS, E1000_EIMS_OTHER);
3048
3049 return IRQ_HANDLED;
3050}
3051
3052static irqreturn_t igb_msix_tx(int irq, void *data)
3053{
3054 struct igb_ring *tx_ring = data;
3055 struct igb_adapter *adapter = tx_ring->adapter;
3056 struct e1000_hw *hw = &adapter->hw;
3057
3058 if (!tx_ring->itr_val)
3059 wr32(E1000_EIMC, tx_ring->eims_value);
3060
3061 tx_ring->total_bytes = 0;
3062 tx_ring->total_packets = 0;
3063 if (!igb_clean_tx_irq(adapter, tx_ring))
3064 /* Ring was not completely cleaned, so fire another interrupt */
3065 wr32(E1000_EICS, tx_ring->eims_value);
3066
3067 if (!tx_ring->itr_val)
3068 wr32(E1000_EIMS, tx_ring->eims_value);
3069 return IRQ_HANDLED;
3070}
3071
3072static irqreturn_t igb_msix_rx(int irq, void *data)
3073{
3074 struct igb_ring *rx_ring = data;
3075 struct igb_adapter *adapter = rx_ring->adapter;
3076 struct e1000_hw *hw = &adapter->hw;
3077
3078 if (!rx_ring->itr_val)
3079 wr32(E1000_EIMC, rx_ring->eims_value);
3080
3081 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) {
3082 rx_ring->total_bytes = 0;
3083 rx_ring->total_packets = 0;
3084 rx_ring->no_itr_adjust = 0;
3085 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3086 } else {
3087 if (!rx_ring->no_itr_adjust) {
3088 igb_lower_rx_eitr(adapter, rx_ring);
3089 rx_ring->no_itr_adjust = 1;
3090 }
3091 }
3092
3093 return IRQ_HANDLED;
3094}
3095
3096
3097/**
3098 * igb_intr_msi - Interrupt Handler
3099 * @irq: interrupt number
3100 * @data: pointer to a network interface device structure
3101 **/
3102static irqreturn_t igb_intr_msi(int irq, void *data)
3103{
3104 struct net_device *netdev = data;
3105 struct igb_adapter *adapter = netdev_priv(netdev);
3106 struct napi_struct *napi = &adapter->napi;
3107 struct e1000_hw *hw = &adapter->hw;
3108 /* read ICR disables interrupts using IAM */
3109 u32 icr = rd32(E1000_ICR);
3110
3111 /* Write the ITR value calculated at the end of the
3112 * previous interrupt.
3113 */
3114 if (adapter->set_itr) {
3115 wr32(E1000_ITR,
3116 1000000000 / (adapter->itr * 256));
3117 adapter->set_itr = 0;
3118 }
3119
3120 /* read ICR disables interrupts using IAM */
3121 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3122 hw->mac.get_link_status = 1;
3123 if (!test_bit(__IGB_DOWN, &adapter->state))
3124 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3125 }
3126
3127 if (netif_rx_schedule_prep(netdev, napi)) {
3128 adapter->tx_ring->total_bytes = 0;
3129 adapter->tx_ring->total_packets = 0;
3130 adapter->rx_ring->total_bytes = 0;
3131 adapter->rx_ring->total_packets = 0;
3132 __netif_rx_schedule(netdev, napi);
3133 }
3134
3135 return IRQ_HANDLED;
3136}
3137
3138/**
3139 * igb_intr - Interrupt Handler
3140 * @irq: interrupt number
3141 * @data: pointer to a network interface device structure
3142 **/
3143static irqreturn_t igb_intr(int irq, void *data)
3144{
3145 struct net_device *netdev = data;
3146 struct igb_adapter *adapter = netdev_priv(netdev);
3147 struct napi_struct *napi = &adapter->napi;
3148 struct e1000_hw *hw = &adapter->hw;
3149 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3150 * need for the IMC write */
3151 u32 icr = rd32(E1000_ICR);
3152 u32 eicr = 0;
3153 if (!icr)
3154 return IRQ_NONE; /* Not our interrupt */
3155
3156 /* Write the ITR value calculated at the end of the
3157 * previous interrupt.
3158 */
3159 if (adapter->set_itr) {
3160 wr32(E1000_ITR,
3161 1000000000 / (adapter->itr * 256));
3162 adapter->set_itr = 0;
3163 }
3164
3165 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3166 * not set, then the adapter didn't send an interrupt */
3167 if (!(icr & E1000_ICR_INT_ASSERTED))
3168 return IRQ_NONE;
3169
3170 eicr = rd32(E1000_EICR);
3171
3172 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3173 hw->mac.get_link_status = 1;
3174 /* guard against interrupt when we're going down */
3175 if (!test_bit(__IGB_DOWN, &adapter->state))
3176 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3177 }
3178
3179 if (netif_rx_schedule_prep(netdev, napi)) {
3180 adapter->tx_ring->total_bytes = 0;
3181 adapter->rx_ring->total_bytes = 0;
3182 adapter->tx_ring->total_packets = 0;
3183 adapter->rx_ring->total_packets = 0;
3184 __netif_rx_schedule(netdev, napi);
3185 }
3186
3187 return IRQ_HANDLED;
3188}
3189
3190/**
3191 * igb_clean - NAPI Rx polling callback
3192 * @adapter: board private structure
3193 **/
3194static int igb_clean(struct napi_struct *napi, int budget)
3195{
3196 struct igb_adapter *adapter = container_of(napi, struct igb_adapter,
3197 napi);
3198 struct net_device *netdev = adapter->netdev;
3199 int tx_clean_complete = 1, work_done = 0;
3200 int i;
3201
3202 /* Must NOT use netdev_priv macro here. */
3203 adapter = netdev->priv;
3204
3205 /* Keep link state information with original netdev */
3206 if (!netif_carrier_ok(netdev))
3207 goto quit_polling;
3208
3209 /* igb_clean is called per-cpu. This lock protects tx_ring[i] from
3210 * being cleaned by multiple cpus simultaneously. A failure obtaining
3211 * the lock means tx_ring[i] is currently being cleaned anyway. */
3212 for (i = 0; i < adapter->num_tx_queues; i++) {
3213 if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) {
3214 tx_clean_complete &= igb_clean_tx_irq(adapter,
3215 &adapter->tx_ring[i]);
3216 spin_unlock(&adapter->tx_ring[i].tx_clean_lock);
3217 }
3218 }
3219
3220 for (i = 0; i < adapter->num_rx_queues; i++)
3221 igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done,
3222 adapter->rx_ring[i].napi.weight);
3223
3224 /* If no Tx and not enough Rx work done, exit the polling mode */
3225 if ((tx_clean_complete && (work_done < budget)) ||
3226 !netif_running(netdev)) {
3227quit_polling:
3228 if (adapter->itr_setting & 3)
3229 igb_set_itr(adapter, E1000_ITR, false);
3230 netif_rx_complete(netdev, napi);
3231 if (!test_bit(__IGB_DOWN, &adapter->state))
3232 igb_irq_enable(adapter);
3233 return 0;
3234 }
3235
3236 return 1;
3237}
3238
3239static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3240{
3241 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3242 struct igb_adapter *adapter = rx_ring->adapter;
3243 struct e1000_hw *hw = &adapter->hw;
3244 struct net_device *netdev = adapter->netdev;
3245 int work_done = 0;
3246
3247 /* Keep link state information with original netdev */
3248 if (!netif_carrier_ok(netdev))
3249 goto quit_polling;
3250
3251 igb_clean_rx_irq_adv(adapter, rx_ring, &work_done, budget);
3252
3253
3254 /* If not enough Rx work done, exit the polling mode */
3255 if ((work_done == 0) || !netif_running(netdev)) {
3256quit_polling:
3257 netif_rx_complete(netdev, napi);
3258
3259 wr32(E1000_EIMS, rx_ring->eims_value);
3260 if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust &&
3261 (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) {
3262 int mean_size = rx_ring->total_bytes /
3263 rx_ring->total_packets;
3264 if (mean_size < IGB_DYN_ITR_LENGTH_LOW)
3265 igb_raise_rx_eitr(adapter, rx_ring);
3266 else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH)
3267 igb_lower_rx_eitr(adapter, rx_ring);
3268 }
3269 return 0;
3270 }
3271
3272 return 1;
3273}
3274/**
3275 * igb_clean_tx_irq - Reclaim resources after transmit completes
3276 * @adapter: board private structure
3277 * returns true if ring is completely cleaned
3278 **/
3279static bool igb_clean_tx_irq(struct igb_adapter *adapter,
3280 struct igb_ring *tx_ring)
3281{
3282 struct net_device *netdev = adapter->netdev;
3283 struct e1000_hw *hw = &adapter->hw;
3284 struct e1000_tx_desc *tx_desc;
3285 struct igb_buffer *buffer_info;
3286 struct sk_buff *skb;
3287 unsigned int i;
3288 u32 head, oldhead;
3289 unsigned int count = 0;
3290 bool cleaned = false;
3291 bool retval = true;
3292 unsigned int total_bytes = 0, total_packets = 0;
3293
3294 rmb();
3295 head = *(volatile u32 *)((struct e1000_tx_desc *)tx_ring->desc
3296 + tx_ring->count);
3297 head = le32_to_cpu(head);
3298 i = tx_ring->next_to_clean;
3299 while (1) {
3300 while (i != head) {
3301 cleaned = true;
3302 tx_desc = E1000_TX_DESC(*tx_ring, i);
3303 buffer_info = &tx_ring->buffer_info[i];
3304 skb = buffer_info->skb;
3305
3306 if (skb) {
3307 unsigned int segs, bytecount;
3308 /* gso_segs is currently only valid for tcp */
3309 segs = skb_shinfo(skb)->gso_segs ?: 1;
3310 /* multiply data chunks by size of headers */
3311 bytecount = ((segs - 1) * skb_headlen(skb)) +
3312 skb->len;
3313 total_packets += segs;
3314 total_bytes += bytecount;
3315 }
3316
3317 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3318 tx_desc->upper.data = 0;
3319
3320 i++;
3321 if (i == tx_ring->count)
3322 i = 0;
3323
3324 count++;
3325 if (count == IGB_MAX_TX_CLEAN) {
3326 retval = false;
3327 goto done_cleaning;
3328 }
3329 }
3330 oldhead = head;
3331 rmb();
3332 head = *(volatile u32 *)((struct e1000_tx_desc *)tx_ring->desc
3333 + tx_ring->count);
3334 head = le32_to_cpu(head);
3335 if (head == oldhead)
3336 goto done_cleaning;
3337 } /* while (1) */
3338
3339done_cleaning:
3340 tx_ring->next_to_clean = i;
3341
3342 if (unlikely(cleaned &&
3343 netif_carrier_ok(netdev) &&
3344 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3345 /* Make sure that anybody stopping the queue after this
3346 * sees the new next_to_clean.
3347 */
3348 smp_mb();
3349 if (netif_queue_stopped(netdev) &&
3350 !(test_bit(__IGB_DOWN, &adapter->state))) {
3351 netif_wake_queue(netdev);
3352 ++adapter->restart_queue;
3353 }
3354 }
3355
3356 if (tx_ring->detect_tx_hung) {
3357 /* Detect a transmit hang in hardware, this serializes the
3358 * check with the clearing of time_stamp and movement of i */
3359 tx_ring->detect_tx_hung = false;
3360 if (tx_ring->buffer_info[i].time_stamp &&
3361 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
3362 (adapter->tx_timeout_factor * HZ))
3363 && !(rd32(E1000_STATUS) &
3364 E1000_STATUS_TXOFF)) {
3365
3366 tx_desc = E1000_TX_DESC(*tx_ring, i);
3367 /* detected Tx unit hang */
3368 dev_err(&adapter->pdev->dev,
3369 "Detected Tx Unit Hang\n"
3370 " Tx Queue <%lu>\n"
3371 " TDH <%x>\n"
3372 " TDT <%x>\n"
3373 " next_to_use <%x>\n"
3374 " next_to_clean <%x>\n"
3375 " head (WB) <%x>\n"
3376 "buffer_info[next_to_clean]\n"
3377 " time_stamp <%lx>\n"
3378 " jiffies <%lx>\n"
3379 " desc.status <%x>\n",
3380 (unsigned long)((tx_ring - adapter->tx_ring) /
3381 sizeof(struct igb_ring)),
3382 readl(adapter->hw.hw_addr + tx_ring->head),
3383 readl(adapter->hw.hw_addr + tx_ring->tail),
3384 tx_ring->next_to_use,
3385 tx_ring->next_to_clean,
3386 head,
3387 tx_ring->buffer_info[i].time_stamp,
3388 jiffies,
3389 tx_desc->upper.fields.status);
3390 netif_stop_queue(netdev);
3391 }
3392 }
3393 tx_ring->total_bytes += total_bytes;
3394 tx_ring->total_packets += total_packets;
3395 adapter->net_stats.tx_bytes += total_bytes;
3396 adapter->net_stats.tx_packets += total_packets;
3397 return retval;
3398}
3399
3400
3401/**
3402 * igb_receive_skb - helper function to handle rx indications
3403 * @adapter: board private structure
3404 * @status: descriptor status field as written by hardware
3405 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3406 * @skb: pointer to sk_buff to be indicated to stack
3407 **/
3408static void igb_receive_skb(struct igb_adapter *adapter, u8 status, u16 vlan,
3409 struct sk_buff *skb)
3410{
3411 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
3412 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3413 le16_to_cpu(vlan) &
3414 E1000_RXD_SPC_VLAN_MASK);
3415 else
3416 netif_receive_skb(skb);
3417}
3418
3419
3420static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3421 u32 status_err, struct sk_buff *skb)
3422{
3423 skb->ip_summed = CHECKSUM_NONE;
3424
3425 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3426 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
3427 return;
3428 /* TCP/UDP checksum error bit is set */
3429 if (status_err &
3430 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
3431 /* let the stack verify checksum errors */
3432 adapter->hw_csum_err++;
3433 return;
3434 }
3435 /* It must be a TCP or UDP packet with a valid checksum */
3436 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
3437 skb->ip_summed = CHECKSUM_UNNECESSARY;
3438
3439 adapter->hw_csum_good++;
3440}
3441
3442static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter,
3443 struct igb_ring *rx_ring,
3444 int *work_done, int budget)
3445{
3446 struct net_device *netdev = adapter->netdev;
3447 struct pci_dev *pdev = adapter->pdev;
3448 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3449 struct igb_buffer *buffer_info , *next_buffer;
3450 struct sk_buff *skb;
3451 unsigned int i, j;
3452 u32 length, hlen, staterr;
3453 bool cleaned = false;
3454 int cleaned_count = 0;
3455 unsigned int total_bytes = 0, total_packets = 0;
3456
3457 i = rx_ring->next_to_clean;
3458 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3459 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3460
3461 while (staterr & E1000_RXD_STAT_DD) {
3462 if (*work_done >= budget)
3463 break;
3464 (*work_done)++;
3465 buffer_info = &rx_ring->buffer_info[i];
3466
3467 /* HW will not DMA in data larger than the given buffer, even
3468 * if it parses the (NFS, of course) header to be larger. In
3469 * that case, it fills the header buffer and spills the rest
3470 * into the page.
3471 */
3472 hlen = le16_to_cpu((rx_desc->wb.lower.lo_dword.hdr_info &
3473 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT);
3474 if (hlen > adapter->rx_ps_hdr_size)
3475 hlen = adapter->rx_ps_hdr_size;
3476
3477 length = le16_to_cpu(rx_desc->wb.upper.length);
3478 cleaned = true;
3479 cleaned_count++;
3480
3481 if (rx_ring->pending_skb != NULL) {
3482 skb = rx_ring->pending_skb;
3483 rx_ring->pending_skb = NULL;
3484 j = rx_ring->pending_skb_page;
3485 } else {
3486 skb = buffer_info->skb;
3487 prefetch(skb->data - NET_IP_ALIGN);
3488 buffer_info->skb = NULL;
3489 if (hlen) {
3490 pci_unmap_single(pdev, buffer_info->dma,
3491 adapter->rx_ps_hdr_size +
3492 NET_IP_ALIGN,
3493 PCI_DMA_FROMDEVICE);
3494 skb_put(skb, hlen);
3495 } else {
3496 pci_unmap_single(pdev, buffer_info->dma,
3497 adapter->rx_buffer_len +
3498 NET_IP_ALIGN,
3499 PCI_DMA_FROMDEVICE);
3500 skb_put(skb, length);
3501 goto send_up;
3502 }
3503 j = 0;
3504 }
3505
3506 while (length) {
3507 pci_unmap_page(pdev, buffer_info->page_dma,
3508 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3509 buffer_info->page_dma = 0;
3510 skb_fill_page_desc(skb, j, buffer_info->page,
3511 0, length);
3512 buffer_info->page = NULL;
3513
3514 skb->len += length;
3515 skb->data_len += length;
3516 skb->truesize += length;
3517 rx_desc->wb.upper.status_error = 0;
3518 if (staterr & E1000_RXD_STAT_EOP)
3519 break;
3520
3521 j++;
3522 cleaned_count++;
3523 i++;
3524 if (i == rx_ring->count)
3525 i = 0;
3526
3527 buffer_info = &rx_ring->buffer_info[i];
3528 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3530 length = le16_to_cpu(rx_desc->wb.upper.length);
3531 if (!(staterr & E1000_RXD_STAT_DD)) {
3532 rx_ring->pending_skb = skb;
3533 rx_ring->pending_skb_page = j;
3534 goto out;
3535 }
3536 }
3537send_up:
3538 pskb_trim(skb, skb->len - 4);
3539 i++;
3540 if (i == rx_ring->count)
3541 i = 0;
3542 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3543 prefetch(next_rxd);
3544 next_buffer = &rx_ring->buffer_info[i];
3545
3546 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3547 dev_kfree_skb_irq(skb);
3548 goto next_desc;
3549 }
3550 rx_ring->no_itr_adjust |= (staterr & E1000_RXD_STAT_DYNINT);
3551
3552 total_bytes += skb->len;
3553 total_packets++;
3554
3555 igb_rx_checksum_adv(adapter, staterr, skb);
3556
3557 skb->protocol = eth_type_trans(skb, netdev);
3558
3559 igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb);
3560
3561 netdev->last_rx = jiffies;
3562
3563next_desc:
3564 rx_desc->wb.upper.status_error = 0;
3565
3566 /* return some buffers to hardware, one at a time is too slow */
3567 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3568 igb_alloc_rx_buffers_adv(adapter, rx_ring,
3569 cleaned_count);
3570 cleaned_count = 0;
3571 }
3572
3573 /* use prefetched values */
3574 rx_desc = next_rxd;
3575 buffer_info = next_buffer;
3576
3577 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3578 }
3579out:
3580 rx_ring->next_to_clean = i;
3581 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3582
3583 if (cleaned_count)
3584 igb_alloc_rx_buffers_adv(adapter, rx_ring, cleaned_count);
3585
3586 rx_ring->total_packets += total_packets;
3587 rx_ring->total_bytes += total_bytes;
3588 rx_ring->rx_stats.packets += total_packets;
3589 rx_ring->rx_stats.bytes += total_bytes;
3590 adapter->net_stats.rx_bytes += total_bytes;
3591 adapter->net_stats.rx_packets += total_packets;
3592 return cleaned;
3593}
3594
3595
3596/**
3597 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3598 * @adapter: address of board private structure
3599 **/
3600static void igb_alloc_rx_buffers_adv(struct igb_adapter *adapter,
3601 struct igb_ring *rx_ring,
3602 int cleaned_count)
3603{
3604 struct net_device *netdev = adapter->netdev;
3605 struct pci_dev *pdev = adapter->pdev;
3606 union e1000_adv_rx_desc *rx_desc;
3607 struct igb_buffer *buffer_info;
3608 struct sk_buff *skb;
3609 unsigned int i;
3610
3611 i = rx_ring->next_to_use;
3612 buffer_info = &rx_ring->buffer_info[i];
3613
3614 while (cleaned_count--) {
3615 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3616
3617 if (adapter->rx_ps_hdr_size && !buffer_info->page) {
3618 buffer_info->page = alloc_page(GFP_ATOMIC);
3619 if (!buffer_info->page) {
3620 adapter->alloc_rx_buff_failed++;
3621 goto no_buffers;
3622 }
3623 buffer_info->page_dma =
3624 pci_map_page(pdev,
3625 buffer_info->page,
3626 0, PAGE_SIZE,
3627 PCI_DMA_FROMDEVICE);
3628 }
3629
3630 if (!buffer_info->skb) {
3631 int bufsz;
3632
3633 if (adapter->rx_ps_hdr_size)
3634 bufsz = adapter->rx_ps_hdr_size;
3635 else
3636 bufsz = adapter->rx_buffer_len;
3637 bufsz += NET_IP_ALIGN;
3638 skb = netdev_alloc_skb(netdev, bufsz);
3639
3640 if (!skb) {
3641 adapter->alloc_rx_buff_failed++;
3642 goto no_buffers;
3643 }
3644
3645 /* Make buffer alignment 2 beyond a 16 byte boundary
3646 * this will result in a 16 byte aligned IP header after
3647 * the 14 byte MAC header is removed
3648 */
3649 skb_reserve(skb, NET_IP_ALIGN);
3650
3651 buffer_info->skb = skb;
3652 buffer_info->dma = pci_map_single(pdev, skb->data,
3653 bufsz,
3654 PCI_DMA_FROMDEVICE);
3655
3656 }
3657 /* Refresh the desc even if buffer_addrs didn't change because
3658 * each write-back erases this info. */
3659 if (adapter->rx_ps_hdr_size) {
3660 rx_desc->read.pkt_addr =
3661 cpu_to_le64(buffer_info->page_dma);
3662 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
3663 } else {
3664 rx_desc->read.pkt_addr =
3665 cpu_to_le64(buffer_info->dma);
3666 rx_desc->read.hdr_addr = 0;
3667 }
3668
3669 i++;
3670 if (i == rx_ring->count)
3671 i = 0;
3672 buffer_info = &rx_ring->buffer_info[i];
3673 }
3674
3675no_buffers:
3676 if (rx_ring->next_to_use != i) {
3677 rx_ring->next_to_use = i;
3678 if (i == 0)
3679 i = (rx_ring->count - 1);
3680 else
3681 i--;
3682
3683 /* Force memory writes to complete before letting h/w
3684 * know there are new descriptors to fetch. (Only
3685 * applicable for weak-ordered memory model archs,
3686 * such as IA-64). */
3687 wmb();
3688 writel(i, adapter->hw.hw_addr + rx_ring->tail);
3689 }
3690}
3691
3692/**
3693 * igb_mii_ioctl -
3694 * @netdev:
3695 * @ifreq:
3696 * @cmd:
3697 **/
3698static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3699{
3700 struct igb_adapter *adapter = netdev_priv(netdev);
3701 struct mii_ioctl_data *data = if_mii(ifr);
3702
3703 if (adapter->hw.phy.media_type != e1000_media_type_copper)
3704 return -EOPNOTSUPP;
3705
3706 switch (cmd) {
3707 case SIOCGMIIPHY:
3708 data->phy_id = adapter->hw.phy.addr;
3709 break;
3710 case SIOCGMIIREG:
3711 if (!capable(CAP_NET_ADMIN))
3712 return -EPERM;
3713 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
3714 data->reg_num
3715 & 0x1F, &data->val_out))
3716 return -EIO;
3717 break;
3718 case SIOCSMIIREG:
3719 default:
3720 return -EOPNOTSUPP;
3721 }
3722 return 0;
3723}
3724
3725/**
3726 * igb_ioctl -
3727 * @netdev:
3728 * @ifreq:
3729 * @cmd:
3730 **/
3731static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3732{
3733 switch (cmd) {
3734 case SIOCGMIIPHY:
3735 case SIOCGMIIREG:
3736 case SIOCSMIIREG:
3737 return igb_mii_ioctl(netdev, ifr, cmd);
3738 default:
3739 return -EOPNOTSUPP;
3740 }
3741}
3742
3743static void igb_vlan_rx_register(struct net_device *netdev,
3744 struct vlan_group *grp)
3745{
3746 struct igb_adapter *adapter = netdev_priv(netdev);
3747 struct e1000_hw *hw = &adapter->hw;
3748 u32 ctrl, rctl;
3749
3750 igb_irq_disable(adapter);
3751 adapter->vlgrp = grp;
3752
3753 if (grp) {
3754 /* enable VLAN tag insert/strip */
3755 ctrl = rd32(E1000_CTRL);
3756 ctrl |= E1000_CTRL_VME;
3757 wr32(E1000_CTRL, ctrl);
3758
3759 /* enable VLAN receive filtering */
3760 rctl = rd32(E1000_RCTL);
3761 rctl |= E1000_RCTL_VFE;
3762 rctl &= ~E1000_RCTL_CFIEN;
3763 wr32(E1000_RCTL, rctl);
3764 igb_update_mng_vlan(adapter);
3765 wr32(E1000_RLPML,
3766 adapter->max_frame_size + VLAN_TAG_SIZE);
3767 } else {
3768 /* disable VLAN tag insert/strip */
3769 ctrl = rd32(E1000_CTRL);
3770 ctrl &= ~E1000_CTRL_VME;
3771 wr32(E1000_CTRL, ctrl);
3772
3773 /* disable VLAN filtering */
3774 rctl = rd32(E1000_RCTL);
3775 rctl &= ~E1000_RCTL_VFE;
3776 wr32(E1000_RCTL, rctl);
3777 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
3778 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3779 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
3780 }
3781 wr32(E1000_RLPML,
3782 adapter->max_frame_size);
3783 }
3784
3785 if (!test_bit(__IGB_DOWN, &adapter->state))
3786 igb_irq_enable(adapter);
3787}
3788
3789static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3790{
3791 struct igb_adapter *adapter = netdev_priv(netdev);
3792 struct e1000_hw *hw = &adapter->hw;
3793 u32 vfta, index;
3794
3795 if ((adapter->hw.mng_cookie.status &
3796 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3797 (vid == adapter->mng_vlan_id))
3798 return;
3799 /* add VID to filter table */
3800 index = (vid >> 5) & 0x7F;
3801 vfta = array_rd32(E1000_VFTA, index);
3802 vfta |= (1 << (vid & 0x1F));
3803 igb_write_vfta(&adapter->hw, index, vfta);
3804}
3805
3806static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3807{
3808 struct igb_adapter *adapter = netdev_priv(netdev);
3809 struct e1000_hw *hw = &adapter->hw;
3810 u32 vfta, index;
3811
3812 igb_irq_disable(adapter);
3813 vlan_group_set_device(adapter->vlgrp, vid, NULL);
3814
3815 if (!test_bit(__IGB_DOWN, &adapter->state))
3816 igb_irq_enable(adapter);
3817
3818 if ((adapter->hw.mng_cookie.status &
3819 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3820 (vid == adapter->mng_vlan_id)) {
3821 /* release control to f/w */
3822 igb_release_hw_control(adapter);
3823 return;
3824 }
3825
3826 /* remove VID from filter table */
3827 index = (vid >> 5) & 0x7F;
3828 vfta = array_rd32(E1000_VFTA, index);
3829 vfta &= ~(1 << (vid & 0x1F));
3830 igb_write_vfta(&adapter->hw, index, vfta);
3831}
3832
3833static void igb_restore_vlan(struct igb_adapter *adapter)
3834{
3835 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3836
3837 if (adapter->vlgrp) {
3838 u16 vid;
3839 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
3840 if (!vlan_group_get_device(adapter->vlgrp, vid))
3841 continue;
3842 igb_vlan_rx_add_vid(adapter->netdev, vid);
3843 }
3844 }
3845}
3846
3847int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
3848{
3849 struct e1000_mac_info *mac = &adapter->hw.mac;
3850
3851 mac->autoneg = 0;
3852
3853 /* Fiber NICs only allow 1000 gbps Full duplex */
3854 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
3855 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
3856 dev_err(&adapter->pdev->dev,
3857 "Unsupported Speed/Duplex configuration\n");
3858 return -EINVAL;
3859 }
3860
3861 switch (spddplx) {
3862 case SPEED_10 + DUPLEX_HALF:
3863 mac->forced_speed_duplex = ADVERTISE_10_HALF;
3864 break;
3865 case SPEED_10 + DUPLEX_FULL:
3866 mac->forced_speed_duplex = ADVERTISE_10_FULL;
3867 break;
3868 case SPEED_100 + DUPLEX_HALF:
3869 mac->forced_speed_duplex = ADVERTISE_100_HALF;
3870 break;
3871 case SPEED_100 + DUPLEX_FULL:
3872 mac->forced_speed_duplex = ADVERTISE_100_FULL;
3873 break;
3874 case SPEED_1000 + DUPLEX_FULL:
3875 mac->autoneg = 1;
3876 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
3877 break;
3878 case SPEED_1000 + DUPLEX_HALF: /* not supported */
3879 default:
3880 dev_err(&adapter->pdev->dev,
3881 "Unsupported Speed/Duplex configuration\n");
3882 return -EINVAL;
3883 }
3884 return 0;
3885}
3886
3887
3888static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3889{
3890 struct net_device *netdev = pci_get_drvdata(pdev);
3891 struct igb_adapter *adapter = netdev_priv(netdev);
3892 struct e1000_hw *hw = &adapter->hw;
3893 u32 ctrl, ctrl_ext, rctl, status;
3894 u32 wufc = adapter->wol;
3895#ifdef CONFIG_PM
3896 int retval = 0;
3897#endif
3898
3899 netif_device_detach(netdev);
3900
3901 if (netif_running(netdev)) {
3902 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3903 igb_down(adapter);
3904 igb_free_irq(adapter);
3905 }
3906
3907#ifdef CONFIG_PM
3908 retval = pci_save_state(pdev);
3909 if (retval)
3910 return retval;
3911#endif
3912
3913 status = rd32(E1000_STATUS);
3914 if (status & E1000_STATUS_LU)
3915 wufc &= ~E1000_WUFC_LNKC;
3916
3917 if (wufc) {
3918 igb_setup_rctl(adapter);
3919 igb_set_multi(netdev);
3920
3921 /* turn on all-multi mode if wake on multicast is enabled */
3922 if (wufc & E1000_WUFC_MC) {
3923 rctl = rd32(E1000_RCTL);
3924 rctl |= E1000_RCTL_MPE;
3925 wr32(E1000_RCTL, rctl);
3926 }
3927
3928 ctrl = rd32(E1000_CTRL);
3929 /* advertise wake from D3Cold */
3930 #define E1000_CTRL_ADVD3WUC 0x00100000
3931 /* phy power management enable */
3932 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3933 ctrl |= E1000_CTRL_ADVD3WUC;
3934 wr32(E1000_CTRL, ctrl);
3935
3936 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3937 adapter->hw.phy.media_type ==
3938 e1000_media_type_internal_serdes) {
3939 /* keep the laser running in D3 */
3940 ctrl_ext = rd32(E1000_CTRL_EXT);
3941 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3942 wr32(E1000_CTRL_EXT, ctrl_ext);
3943 }
3944
3945 /* Allow time for pending master requests to run */
3946 igb_disable_pcie_master(&adapter->hw);
3947
3948 wr32(E1000_WUC, E1000_WUC_PME_EN);
3949 wr32(E1000_WUFC, wufc);
3950 pci_enable_wake(pdev, PCI_D3hot, 1);
3951 pci_enable_wake(pdev, PCI_D3cold, 1);
3952 } else {
3953 wr32(E1000_WUC, 0);
3954 wr32(E1000_WUFC, 0);
3955 pci_enable_wake(pdev, PCI_D3hot, 0);
3956 pci_enable_wake(pdev, PCI_D3cold, 0);
3957 }
3958
3959 igb_release_manageability(adapter);
3960
3961 /* make sure adapter isn't asleep if manageability is enabled */
3962 if (adapter->en_mng_pt) {
3963 pci_enable_wake(pdev, PCI_D3hot, 1);
3964 pci_enable_wake(pdev, PCI_D3cold, 1);
3965 }
3966
3967 /* Release control of h/w to f/w. If f/w is AMT enabled, this
3968 * would have already happened in close and is redundant. */
3969 igb_release_hw_control(adapter);
3970
3971 pci_disable_device(pdev);
3972
3973 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3974
3975 return 0;
3976}
3977
3978#ifdef CONFIG_PM
3979static int igb_resume(struct pci_dev *pdev)
3980{
3981 struct net_device *netdev = pci_get_drvdata(pdev);
3982 struct igb_adapter *adapter = netdev_priv(netdev);
3983 struct e1000_hw *hw = &adapter->hw;
3984 u32 err;
3985
3986 pci_set_power_state(pdev, PCI_D0);
3987 pci_restore_state(pdev);
3988 err = pci_enable_device(pdev);
3989 if (err) {
3990 dev_err(&pdev->dev,
3991 "igb: Cannot enable PCI device from suspend\n");
3992 return err;
3993 }
3994 pci_set_master(pdev);
3995
3996 pci_enable_wake(pdev, PCI_D3hot, 0);
3997 pci_enable_wake(pdev, PCI_D3cold, 0);
3998
3999 if (netif_running(netdev)) {
4000 err = igb_request_irq(adapter);
4001 if (err)
4002 return err;
4003 }
4004
4005 /* e1000_power_up_phy(adapter); */
4006
4007 igb_reset(adapter);
4008 wr32(E1000_WUS, ~0);
4009
4010 igb_init_manageability(adapter);
4011
4012 if (netif_running(netdev))
4013 igb_up(adapter);
4014
4015 netif_device_attach(netdev);
4016
4017 /* let the f/w know that the h/w is now under the control of the
4018 * driver. */
4019 igb_get_hw_control(adapter);
4020
4021 return 0;
4022}
4023#endif
4024
4025static void igb_shutdown(struct pci_dev *pdev)
4026{
4027 igb_suspend(pdev, PMSG_SUSPEND);
4028}
4029
4030#ifdef CONFIG_NET_POLL_CONTROLLER
4031/*
4032 * Polling 'interrupt' - used by things like netconsole to send skbs
4033 * without having to re-enable interrupts. It's not called while
4034 * the interrupt routine is executing.
4035 */
4036static void igb_netpoll(struct net_device *netdev)
4037{
4038 struct igb_adapter *adapter = netdev_priv(netdev);
4039 int i;
4040 int work_done = 0;
4041
4042 igb_irq_disable(adapter);
4043 for (i = 0; i < adapter->num_tx_queues; i++)
4044 igb_clean_tx_irq(adapter, &adapter->tx_ring[i]);
4045
4046 for (i = 0; i < adapter->num_rx_queues; i++)
4047 igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i],
4048 &work_done,
4049 adapter->rx_ring[i].napi.weight);
4050
4051 igb_irq_enable(adapter);
4052}
4053#endif /* CONFIG_NET_POLL_CONTROLLER */
4054
4055/**
4056 * igb_io_error_detected - called when PCI error is detected
4057 * @pdev: Pointer to PCI device
4058 * @state: The current pci connection state
4059 *
4060 * This function is called after a PCI bus error affecting
4061 * this device has been detected.
4062 */
4063static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
4064 pci_channel_state_t state)
4065{
4066 struct net_device *netdev = pci_get_drvdata(pdev);
4067 struct igb_adapter *adapter = netdev_priv(netdev);
4068
4069 netif_device_detach(netdev);
4070
4071 if (netif_running(netdev))
4072 igb_down(adapter);
4073 pci_disable_device(pdev);
4074
4075 /* Request a slot slot reset. */
4076 return PCI_ERS_RESULT_NEED_RESET;
4077}
4078
4079/**
4080 * igb_io_slot_reset - called after the pci bus has been reset.
4081 * @pdev: Pointer to PCI device
4082 *
4083 * Restart the card from scratch, as if from a cold-boot. Implementation
4084 * resembles the first-half of the igb_resume routine.
4085 */
4086static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4087{
4088 struct net_device *netdev = pci_get_drvdata(pdev);
4089 struct igb_adapter *adapter = netdev_priv(netdev);
4090 struct e1000_hw *hw = &adapter->hw;
4091
4092 if (pci_enable_device(pdev)) {
4093 dev_err(&pdev->dev,
4094 "Cannot re-enable PCI device after reset.\n");
4095 return PCI_ERS_RESULT_DISCONNECT;
4096 }
4097 pci_set_master(pdev);
4098
4099 pci_enable_wake(pdev, PCI_D3hot, 0);
4100 pci_enable_wake(pdev, PCI_D3cold, 0);
4101
4102 igb_reset(adapter);
4103 wr32(E1000_WUS, ~0);
4104
4105 return PCI_ERS_RESULT_RECOVERED;
4106}
4107
4108/**
4109 * igb_io_resume - called when traffic can start flowing again.
4110 * @pdev: Pointer to PCI device
4111 *
4112 * This callback is called when the error recovery driver tells us that
4113 * its OK to resume normal operation. Implementation resembles the
4114 * second-half of the igb_resume routine.
4115 */
4116static void igb_io_resume(struct pci_dev *pdev)
4117{
4118 struct net_device *netdev = pci_get_drvdata(pdev);
4119 struct igb_adapter *adapter = netdev_priv(netdev);
4120
4121 igb_init_manageability(adapter);
4122
4123 if (netif_running(netdev)) {
4124 if (igb_up(adapter)) {
4125 dev_err(&pdev->dev, "igb_up failed after reset\n");
4126 return;
4127 }
4128 }
4129
4130 netif_device_attach(netdev);
4131
4132 /* let the f/w know that the h/w is now under the control of the
4133 * driver. */
4134 igb_get_hw_control(adapter);
4135
4136}
4137
4138/* igb_main.c */