diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-04-07 10:42:33 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-10 23:03:27 -0400 |
commit | dee1ad47f2ee75f5146d83ca757c1b7861c34c3b (patch) | |
tree | 47cbdefe3d0f9b729724e378ad6a96eaddfd5fbc /drivers/net/ethernet/intel/e1000e | |
parent | f7917c009c28c941ba151ee66f04dc7f6a2e1e0b (diff) |
intel: Move the Intel wired LAN drivers
Moves the Intel wired LAN drivers into drivers/net/ethernet/intel/ and
the necessary Kconfig and Makefile changes.
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/80003es2lan.c | 1516 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/82571.c | 2115 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/Makefile | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/defines.h | 844 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/e1000.h | 736 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ethtool.c | 2081 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/hw.h | 984 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ich8lan.c | 4111 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/lib.c | 2692 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 6312 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/param.c | 478 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/phy.c | 3377 |
12 files changed, 25283 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c new file mode 100644 index 000000000000..e4f42257c24c --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c | |||
@@ -0,0 +1,1516 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* | ||
30 | * 80003ES2LAN Gigabit Ethernet Controller (Copper) | ||
31 | * 80003ES2LAN Gigabit Ethernet Controller (Serdes) | ||
32 | */ | ||
33 | |||
34 | #include "e1000.h" | ||
35 | |||
36 | #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 | ||
37 | #define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 | ||
38 | #define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 | ||
39 | #define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F | ||
40 | |||
41 | #define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 | ||
42 | #define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 | ||
43 | #define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 | ||
44 | |||
45 | #define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 | ||
46 | #define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 | ||
47 | #define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 | ||
48 | |||
49 | #define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C | ||
50 | #define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 | ||
51 | |||
52 | #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ | ||
53 | #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 | ||
54 | |||
55 | #define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 | ||
56 | #define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 | ||
57 | |||
58 | /* GG82563 PHY Specific Status Register (Page 0, Register 16 */ | ||
59 | #define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ | ||
60 | #define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 | ||
61 | #define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ | ||
62 | #define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ | ||
63 | #define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ | ||
64 | |||
65 | /* PHY Specific Control Register 2 (Page 0, Register 26) */ | ||
66 | #define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 | ||
67 | /* 1=Reverse Auto-Negotiation */ | ||
68 | |||
69 | /* MAC Specific Control Register (Page 2, Register 21) */ | ||
70 | /* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ | ||
71 | #define GG82563_MSCR_TX_CLK_MASK 0x0007 | ||
72 | #define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 | ||
73 | #define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 | ||
74 | #define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 | ||
75 | |||
76 | #define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ | ||
77 | |||
78 | /* DSP Distance Register (Page 5, Register 26) */ | ||
79 | #define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M | ||
80 | 1 = 50-80M | ||
81 | 2 = 80-110M | ||
82 | 3 = 110-140M | ||
83 | 4 = >140M */ | ||
84 | |||
85 | /* Kumeran Mode Control Register (Page 193, Register 16) */ | ||
86 | #define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 | ||
87 | |||
88 | /* Max number of times Kumeran read/write should be validated */ | ||
89 | #define GG82563_MAX_KMRN_RETRY 0x5 | ||
90 | |||
91 | /* Power Management Control Register (Page 193, Register 20) */ | ||
92 | #define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 | ||
93 | /* 1=Enable SERDES Electrical Idle */ | ||
94 | |||
95 | /* In-Band Control Register (Page 194, Register 18) */ | ||
96 | #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ | ||
97 | |||
98 | /* | ||
99 | * A table for the GG82563 cable length where the range is defined | ||
100 | * with a lower bound at "index" and the upper bound at | ||
101 | * "index + 5". | ||
102 | */ | ||
103 | static const u16 e1000_gg82563_cable_length_table[] = { | ||
104 | 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; | ||
105 | #define GG82563_CABLE_LENGTH_TABLE_SIZE \ | ||
106 | ARRAY_SIZE(e1000_gg82563_cable_length_table) | ||
107 | |||
108 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); | ||
109 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); | ||
110 | static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); | ||
111 | static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); | ||
112 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); | ||
113 | static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); | ||
114 | static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); | ||
115 | static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); | ||
116 | static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | ||
117 | u16 *data); | ||
118 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | ||
119 | u16 data); | ||
120 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); | ||
121 | |||
122 | /** | ||
123 | * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. | ||
124 | * @hw: pointer to the HW structure | ||
125 | **/ | ||
126 | static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | ||
127 | { | ||
128 | struct e1000_phy_info *phy = &hw->phy; | ||
129 | s32 ret_val; | ||
130 | |||
131 | if (hw->phy.media_type != e1000_media_type_copper) { | ||
132 | phy->type = e1000_phy_none; | ||
133 | return 0; | ||
134 | } else { | ||
135 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
136 | phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; | ||
137 | } | ||
138 | |||
139 | phy->addr = 1; | ||
140 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
141 | phy->reset_delay_us = 100; | ||
142 | phy->type = e1000_phy_gg82563; | ||
143 | |||
144 | /* This can only be done after all function pointers are setup. */ | ||
145 | ret_val = e1000e_get_phy_id(hw); | ||
146 | |||
147 | /* Verify phy id */ | ||
148 | if (phy->id != GG82563_E_PHY_ID) | ||
149 | return -E1000_ERR_PHY; | ||
150 | |||
151 | return ret_val; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. | ||
156 | * @hw: pointer to the HW structure | ||
157 | **/ | ||
158 | static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | ||
159 | { | ||
160 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
161 | u32 eecd = er32(EECD); | ||
162 | u16 size; | ||
163 | |||
164 | nvm->opcode_bits = 8; | ||
165 | nvm->delay_usec = 1; | ||
166 | switch (nvm->override) { | ||
167 | case e1000_nvm_override_spi_large: | ||
168 | nvm->page_size = 32; | ||
169 | nvm->address_bits = 16; | ||
170 | break; | ||
171 | case e1000_nvm_override_spi_small: | ||
172 | nvm->page_size = 8; | ||
173 | nvm->address_bits = 8; | ||
174 | break; | ||
175 | default: | ||
176 | nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; | ||
177 | nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; | ||
178 | break; | ||
179 | } | ||
180 | |||
181 | nvm->type = e1000_nvm_eeprom_spi; | ||
182 | |||
183 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | ||
184 | E1000_EECD_SIZE_EX_SHIFT); | ||
185 | |||
186 | /* | ||
187 | * Added to a constant, "size" becomes the left-shift value | ||
188 | * for setting word_size. | ||
189 | */ | ||
190 | size += NVM_WORD_SIZE_BASE_SHIFT; | ||
191 | |||
192 | /* EEPROM access above 16k is unsupported */ | ||
193 | if (size > 14) | ||
194 | size = 14; | ||
195 | nvm->word_size = 1 << size; | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. | ||
202 | * @hw: pointer to the HW structure | ||
203 | **/ | ||
204 | static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | ||
205 | { | ||
206 | struct e1000_hw *hw = &adapter->hw; | ||
207 | struct e1000_mac_info *mac = &hw->mac; | ||
208 | struct e1000_mac_operations *func = &mac->ops; | ||
209 | |||
210 | /* Set media type */ | ||
211 | switch (adapter->pdev->device) { | ||
212 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | ||
213 | hw->phy.media_type = e1000_media_type_internal_serdes; | ||
214 | break; | ||
215 | default: | ||
216 | hw->phy.media_type = e1000_media_type_copper; | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | /* Set mta register count */ | ||
221 | mac->mta_reg_count = 128; | ||
222 | /* Set rar entry count */ | ||
223 | mac->rar_entry_count = E1000_RAR_ENTRIES; | ||
224 | /* FWSM register */ | ||
225 | mac->has_fwsm = true; | ||
226 | /* ARC supported; valid only if manageability features are enabled. */ | ||
227 | mac->arc_subsystem_valid = | ||
228 | (er32(FWSM) & E1000_FWSM_MODE_MASK) | ||
229 | ? true : false; | ||
230 | /* Adaptive IFS not supported */ | ||
231 | mac->adaptive_ifs = false; | ||
232 | |||
233 | /* check for link */ | ||
234 | switch (hw->phy.media_type) { | ||
235 | case e1000_media_type_copper: | ||
236 | func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; | ||
237 | func->check_for_link = e1000e_check_for_copper_link; | ||
238 | break; | ||
239 | case e1000_media_type_fiber: | ||
240 | func->setup_physical_interface = e1000e_setup_fiber_serdes_link; | ||
241 | func->check_for_link = e1000e_check_for_fiber_link; | ||
242 | break; | ||
243 | case e1000_media_type_internal_serdes: | ||
244 | func->setup_physical_interface = e1000e_setup_fiber_serdes_link; | ||
245 | func->check_for_link = e1000e_check_for_serdes_link; | ||
246 | break; | ||
247 | default: | ||
248 | return -E1000_ERR_CONFIG; | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | /* set lan id for port to determine which phy lock to use */ | ||
253 | hw->mac.ops.set_lan_id(hw); | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) | ||
259 | { | ||
260 | struct e1000_hw *hw = &adapter->hw; | ||
261 | s32 rc; | ||
262 | |||
263 | rc = e1000_init_mac_params_80003es2lan(adapter); | ||
264 | if (rc) | ||
265 | return rc; | ||
266 | |||
267 | rc = e1000_init_nvm_params_80003es2lan(hw); | ||
268 | if (rc) | ||
269 | return rc; | ||
270 | |||
271 | rc = e1000_init_phy_params_80003es2lan(hw); | ||
272 | if (rc) | ||
273 | return rc; | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY | ||
280 | * @hw: pointer to the HW structure | ||
281 | * | ||
282 | * A wrapper to acquire access rights to the correct PHY. | ||
283 | **/ | ||
284 | static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) | ||
285 | { | ||
286 | u16 mask; | ||
287 | |||
288 | mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; | ||
289 | return e1000_acquire_swfw_sync_80003es2lan(hw, mask); | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * e1000_release_phy_80003es2lan - Release rights to access PHY | ||
294 | * @hw: pointer to the HW structure | ||
295 | * | ||
296 | * A wrapper to release access rights to the correct PHY. | ||
297 | **/ | ||
298 | static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) | ||
299 | { | ||
300 | u16 mask; | ||
301 | |||
302 | mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; | ||
303 | e1000_release_swfw_sync_80003es2lan(hw, mask); | ||
304 | } | ||
305 | |||
306 | /** | ||
307 | * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register | ||
308 | * @hw: pointer to the HW structure | ||
309 | * | ||
310 | * Acquire the semaphore to access the Kumeran interface. | ||
311 | * | ||
312 | **/ | ||
313 | static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) | ||
314 | { | ||
315 | u16 mask; | ||
316 | |||
317 | mask = E1000_SWFW_CSR_SM; | ||
318 | |||
319 | return e1000_acquire_swfw_sync_80003es2lan(hw, mask); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register | ||
324 | * @hw: pointer to the HW structure | ||
325 | * | ||
326 | * Release the semaphore used to access the Kumeran interface | ||
327 | **/ | ||
328 | static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) | ||
329 | { | ||
330 | u16 mask; | ||
331 | |||
332 | mask = E1000_SWFW_CSR_SM; | ||
333 | |||
334 | e1000_release_swfw_sync_80003es2lan(hw, mask); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM | ||
339 | * @hw: pointer to the HW structure | ||
340 | * | ||
341 | * Acquire the semaphore to access the EEPROM. | ||
342 | **/ | ||
343 | static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) | ||
344 | { | ||
345 | s32 ret_val; | ||
346 | |||
347 | ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); | ||
348 | if (ret_val) | ||
349 | return ret_val; | ||
350 | |||
351 | ret_val = e1000e_acquire_nvm(hw); | ||
352 | |||
353 | if (ret_val) | ||
354 | e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); | ||
355 | |||
356 | return ret_val; | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM | ||
361 | * @hw: pointer to the HW structure | ||
362 | * | ||
363 | * Release the semaphore used to access the EEPROM. | ||
364 | **/ | ||
365 | static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) | ||
366 | { | ||
367 | e1000e_release_nvm(hw); | ||
368 | e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore | ||
373 | * @hw: pointer to the HW structure | ||
374 | * @mask: specifies which semaphore to acquire | ||
375 | * | ||
376 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask | ||
377 | * will also specify which port we're acquiring the lock for. | ||
378 | **/ | ||
379 | static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | ||
380 | { | ||
381 | u32 swfw_sync; | ||
382 | u32 swmask = mask; | ||
383 | u32 fwmask = mask << 16; | ||
384 | s32 i = 0; | ||
385 | s32 timeout = 50; | ||
386 | |||
387 | while (i < timeout) { | ||
388 | if (e1000e_get_hw_semaphore(hw)) | ||
389 | return -E1000_ERR_SWFW_SYNC; | ||
390 | |||
391 | swfw_sync = er32(SW_FW_SYNC); | ||
392 | if (!(swfw_sync & (fwmask | swmask))) | ||
393 | break; | ||
394 | |||
395 | /* | ||
396 | * Firmware currently using resource (fwmask) | ||
397 | * or other software thread using resource (swmask) | ||
398 | */ | ||
399 | e1000e_put_hw_semaphore(hw); | ||
400 | mdelay(5); | ||
401 | i++; | ||
402 | } | ||
403 | |||
404 | if (i == timeout) { | ||
405 | e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); | ||
406 | return -E1000_ERR_SWFW_SYNC; | ||
407 | } | ||
408 | |||
409 | swfw_sync |= swmask; | ||
410 | ew32(SW_FW_SYNC, swfw_sync); | ||
411 | |||
412 | e1000e_put_hw_semaphore(hw); | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore | ||
419 | * @hw: pointer to the HW structure | ||
420 | * @mask: specifies which semaphore to acquire | ||
421 | * | ||
422 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask | ||
423 | * will also specify which port we're releasing the lock for. | ||
424 | **/ | ||
425 | static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | ||
426 | { | ||
427 | u32 swfw_sync; | ||
428 | |||
429 | while (e1000e_get_hw_semaphore(hw) != 0) | ||
430 | ; /* Empty */ | ||
431 | |||
432 | swfw_sync = er32(SW_FW_SYNC); | ||
433 | swfw_sync &= ~mask; | ||
434 | ew32(SW_FW_SYNC, swfw_sync); | ||
435 | |||
436 | e1000e_put_hw_semaphore(hw); | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register | ||
441 | * @hw: pointer to the HW structure | ||
442 | * @offset: offset of the register to read | ||
443 | * @data: pointer to the data returned from the operation | ||
444 | * | ||
445 | * Read the GG82563 PHY register. | ||
446 | **/ | ||
447 | static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | ||
448 | u32 offset, u16 *data) | ||
449 | { | ||
450 | s32 ret_val; | ||
451 | u32 page_select; | ||
452 | u16 temp; | ||
453 | |||
454 | ret_val = e1000_acquire_phy_80003es2lan(hw); | ||
455 | if (ret_val) | ||
456 | return ret_val; | ||
457 | |||
458 | /* Select Configuration Page */ | ||
459 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
460 | page_select = GG82563_PHY_PAGE_SELECT; | ||
461 | } else { | ||
462 | /* | ||
463 | * Use Alternative Page Select register to access | ||
464 | * registers 30 and 31 | ||
465 | */ | ||
466 | page_select = GG82563_PHY_PAGE_SELECT_ALT; | ||
467 | } | ||
468 | |||
469 | temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); | ||
470 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); | ||
471 | if (ret_val) { | ||
472 | e1000_release_phy_80003es2lan(hw); | ||
473 | return ret_val; | ||
474 | } | ||
475 | |||
476 | if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { | ||
477 | /* | ||
478 | * The "ready" bit in the MDIC register may be incorrectly set | ||
479 | * before the device has completed the "Page Select" MDI | ||
480 | * transaction. So we wait 200us after each MDI command... | ||
481 | */ | ||
482 | udelay(200); | ||
483 | |||
484 | /* ...and verify the command was successful. */ | ||
485 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | ||
486 | |||
487 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { | ||
488 | ret_val = -E1000_ERR_PHY; | ||
489 | e1000_release_phy_80003es2lan(hw); | ||
490 | return ret_val; | ||
491 | } | ||
492 | |||
493 | udelay(200); | ||
494 | |||
495 | ret_val = e1000e_read_phy_reg_mdic(hw, | ||
496 | MAX_PHY_REG_ADDRESS & offset, | ||
497 | data); | ||
498 | |||
499 | udelay(200); | ||
500 | } else { | ||
501 | ret_val = e1000e_read_phy_reg_mdic(hw, | ||
502 | MAX_PHY_REG_ADDRESS & offset, | ||
503 | data); | ||
504 | } | ||
505 | |||
506 | e1000_release_phy_80003es2lan(hw); | ||
507 | |||
508 | return ret_val; | ||
509 | } | ||
510 | |||
511 | /** | ||
512 | * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register | ||
513 | * @hw: pointer to the HW structure | ||
514 | * @offset: offset of the register to read | ||
515 | * @data: value to write to the register | ||
516 | * | ||
517 | * Write to the GG82563 PHY register. | ||
518 | **/ | ||
519 | static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | ||
520 | u32 offset, u16 data) | ||
521 | { | ||
522 | s32 ret_val; | ||
523 | u32 page_select; | ||
524 | u16 temp; | ||
525 | |||
526 | ret_val = e1000_acquire_phy_80003es2lan(hw); | ||
527 | if (ret_val) | ||
528 | return ret_val; | ||
529 | |||
530 | /* Select Configuration Page */ | ||
531 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | ||
532 | page_select = GG82563_PHY_PAGE_SELECT; | ||
533 | } else { | ||
534 | /* | ||
535 | * Use Alternative Page Select register to access | ||
536 | * registers 30 and 31 | ||
537 | */ | ||
538 | page_select = GG82563_PHY_PAGE_SELECT_ALT; | ||
539 | } | ||
540 | |||
541 | temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); | ||
542 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); | ||
543 | if (ret_val) { | ||
544 | e1000_release_phy_80003es2lan(hw); | ||
545 | return ret_val; | ||
546 | } | ||
547 | |||
548 | if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { | ||
549 | /* | ||
550 | * The "ready" bit in the MDIC register may be incorrectly set | ||
551 | * before the device has completed the "Page Select" MDI | ||
552 | * transaction. So we wait 200us after each MDI command... | ||
553 | */ | ||
554 | udelay(200); | ||
555 | |||
556 | /* ...and verify the command was successful. */ | ||
557 | ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); | ||
558 | |||
559 | if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { | ||
560 | e1000_release_phy_80003es2lan(hw); | ||
561 | return -E1000_ERR_PHY; | ||
562 | } | ||
563 | |||
564 | udelay(200); | ||
565 | |||
566 | ret_val = e1000e_write_phy_reg_mdic(hw, | ||
567 | MAX_PHY_REG_ADDRESS & offset, | ||
568 | data); | ||
569 | |||
570 | udelay(200); | ||
571 | } else { | ||
572 | ret_val = e1000e_write_phy_reg_mdic(hw, | ||
573 | MAX_PHY_REG_ADDRESS & offset, | ||
574 | data); | ||
575 | } | ||
576 | |||
577 | e1000_release_phy_80003es2lan(hw); | ||
578 | |||
579 | return ret_val; | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * e1000_write_nvm_80003es2lan - Write to ESB2 NVM | ||
584 | * @hw: pointer to the HW structure | ||
585 | * @offset: offset of the register to read | ||
586 | * @words: number of words to write | ||
587 | * @data: buffer of data to write to the NVM | ||
588 | * | ||
589 | * Write "words" of data to the ESB2 NVM. | ||
590 | **/ | ||
591 | static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, | ||
592 | u16 words, u16 *data) | ||
593 | { | ||
594 | return e1000e_write_nvm_spi(hw, offset, words, data); | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete | ||
599 | * @hw: pointer to the HW structure | ||
600 | * | ||
601 | * Wait a specific amount of time for manageability processes to complete. | ||
602 | * This is a function pointer entry point called by the phy module. | ||
603 | **/ | ||
604 | static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) | ||
605 | { | ||
606 | s32 timeout = PHY_CFG_TIMEOUT; | ||
607 | u32 mask = E1000_NVM_CFG_DONE_PORT_0; | ||
608 | |||
609 | if (hw->bus.func == 1) | ||
610 | mask = E1000_NVM_CFG_DONE_PORT_1; | ||
611 | |||
612 | while (timeout) { | ||
613 | if (er32(EEMNGCTL) & mask) | ||
614 | break; | ||
615 | usleep_range(1000, 2000); | ||
616 | timeout--; | ||
617 | } | ||
618 | if (!timeout) { | ||
619 | e_dbg("MNG configuration cycle has not completed.\n"); | ||
620 | return -E1000_ERR_RESET; | ||
621 | } | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex | ||
628 | * @hw: pointer to the HW structure | ||
629 | * | ||
630 | * Force the speed and duplex settings onto the PHY. This is a | ||
631 | * function pointer entry point called by the phy module. | ||
632 | **/ | ||
633 | static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | ||
634 | { | ||
635 | s32 ret_val; | ||
636 | u16 phy_data; | ||
637 | bool link; | ||
638 | |||
639 | /* | ||
640 | * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | ||
641 | * forced whenever speed and duplex are forced. | ||
642 | */ | ||
643 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
644 | if (ret_val) | ||
645 | return ret_val; | ||
646 | |||
647 | phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; | ||
648 | ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); | ||
649 | if (ret_val) | ||
650 | return ret_val; | ||
651 | |||
652 | e_dbg("GG82563 PSCR: %X\n", phy_data); | ||
653 | |||
654 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | ||
655 | if (ret_val) | ||
656 | return ret_val; | ||
657 | |||
658 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | ||
659 | |||
660 | /* Reset the phy to commit changes. */ | ||
661 | phy_data |= MII_CR_RESET; | ||
662 | |||
663 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | ||
664 | if (ret_val) | ||
665 | return ret_val; | ||
666 | |||
667 | udelay(1); | ||
668 | |||
669 | if (hw->phy.autoneg_wait_to_complete) { | ||
670 | e_dbg("Waiting for forced speed/duplex link " | ||
671 | "on GG82563 phy.\n"); | ||
672 | |||
673 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | ||
674 | 100000, &link); | ||
675 | if (ret_val) | ||
676 | return ret_val; | ||
677 | |||
678 | if (!link) { | ||
679 | /* | ||
680 | * We didn't get link. | ||
681 | * Reset the DSP and cross our fingers. | ||
682 | */ | ||
683 | ret_val = e1000e_phy_reset_dsp(hw); | ||
684 | if (ret_val) | ||
685 | return ret_val; | ||
686 | } | ||
687 | |||
688 | /* Try once more */ | ||
689 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | ||
690 | 100000, &link); | ||
691 | if (ret_val) | ||
692 | return ret_val; | ||
693 | } | ||
694 | |||
695 | ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); | ||
696 | if (ret_val) | ||
697 | return ret_val; | ||
698 | |||
699 | /* | ||
700 | * Resetting the phy means we need to verify the TX_CLK corresponds | ||
701 | * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. | ||
702 | */ | ||
703 | phy_data &= ~GG82563_MSCR_TX_CLK_MASK; | ||
704 | if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) | ||
705 | phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; | ||
706 | else | ||
707 | phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; | ||
708 | |||
709 | /* | ||
710 | * In addition, we must re-enable CRS on Tx for both half and full | ||
711 | * duplex. | ||
712 | */ | ||
713 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
714 | ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); | ||
715 | |||
716 | return ret_val; | ||
717 | } | ||
718 | |||
719 | /** | ||
720 | * e1000_get_cable_length_80003es2lan - Set approximate cable length | ||
721 | * @hw: pointer to the HW structure | ||
722 | * | ||
723 | * Find the approximate cable length as measured by the GG82563 PHY. | ||
724 | * This is a function pointer entry point called by the phy module. | ||
725 | **/ | ||
726 | static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) | ||
727 | { | ||
728 | struct e1000_phy_info *phy = &hw->phy; | ||
729 | s32 ret_val = 0; | ||
730 | u16 phy_data, index; | ||
731 | |||
732 | ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); | ||
733 | if (ret_val) | ||
734 | goto out; | ||
735 | |||
736 | index = phy_data & GG82563_DSPD_CABLE_LENGTH; | ||
737 | |||
738 | if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { | ||
739 | ret_val = -E1000_ERR_PHY; | ||
740 | goto out; | ||
741 | } | ||
742 | |||
743 | phy->min_cable_length = e1000_gg82563_cable_length_table[index]; | ||
744 | phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; | ||
745 | |||
746 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | ||
747 | |||
748 | out: | ||
749 | return ret_val; | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * e1000_get_link_up_info_80003es2lan - Report speed and duplex | ||
754 | * @hw: pointer to the HW structure | ||
755 | * @speed: pointer to speed buffer | ||
756 | * @duplex: pointer to duplex buffer | ||
757 | * | ||
758 | * Retrieve the current speed and duplex configuration. | ||
759 | **/ | ||
760 | static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | ||
761 | u16 *duplex) | ||
762 | { | ||
763 | s32 ret_val; | ||
764 | |||
765 | if (hw->phy.media_type == e1000_media_type_copper) { | ||
766 | ret_val = e1000e_get_speed_and_duplex_copper(hw, | ||
767 | speed, | ||
768 | duplex); | ||
769 | hw->phy.ops.cfg_on_link_up(hw); | ||
770 | } else { | ||
771 | ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, | ||
772 | speed, | ||
773 | duplex); | ||
774 | } | ||
775 | |||
776 | return ret_val; | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * e1000_reset_hw_80003es2lan - Reset the ESB2 controller | ||
781 | * @hw: pointer to the HW structure | ||
782 | * | ||
783 | * Perform a global reset to the ESB2 controller. | ||
784 | **/ | ||
785 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | ||
786 | { | ||
787 | u32 ctrl; | ||
788 | s32 ret_val; | ||
789 | |||
790 | /* | ||
791 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
792 | * on the last TLP read/write transaction when MAC is reset. | ||
793 | */ | ||
794 | ret_val = e1000e_disable_pcie_master(hw); | ||
795 | if (ret_val) | ||
796 | e_dbg("PCI-E Master disable polling has failed.\n"); | ||
797 | |||
798 | e_dbg("Masking off all interrupts\n"); | ||
799 | ew32(IMC, 0xffffffff); | ||
800 | |||
801 | ew32(RCTL, 0); | ||
802 | ew32(TCTL, E1000_TCTL_PSP); | ||
803 | e1e_flush(); | ||
804 | |||
805 | usleep_range(10000, 20000); | ||
806 | |||
807 | ctrl = er32(CTRL); | ||
808 | |||
809 | ret_val = e1000_acquire_phy_80003es2lan(hw); | ||
810 | e_dbg("Issuing a global reset to MAC\n"); | ||
811 | ew32(CTRL, ctrl | E1000_CTRL_RST); | ||
812 | e1000_release_phy_80003es2lan(hw); | ||
813 | |||
814 | ret_val = e1000e_get_auto_rd_done(hw); | ||
815 | if (ret_val) | ||
816 | /* We don't want to continue accessing MAC registers. */ | ||
817 | return ret_val; | ||
818 | |||
819 | /* Clear any pending interrupt events. */ | ||
820 | ew32(IMC, 0xffffffff); | ||
821 | er32(ICR); | ||
822 | |||
823 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
824 | |||
825 | return ret_val; | ||
826 | } | ||
827 | |||
828 | /** | ||
829 | * e1000_init_hw_80003es2lan - Initialize the ESB2 controller | ||
830 | * @hw: pointer to the HW structure | ||
831 | * | ||
832 | * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. | ||
833 | **/ | ||
834 | static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | ||
835 | { | ||
836 | struct e1000_mac_info *mac = &hw->mac; | ||
837 | u32 reg_data; | ||
838 | s32 ret_val; | ||
839 | u16 kum_reg_data; | ||
840 | u16 i; | ||
841 | |||
842 | e1000_initialize_hw_bits_80003es2lan(hw); | ||
843 | |||
844 | /* Initialize identification LED */ | ||
845 | ret_val = e1000e_id_led_init(hw); | ||
846 | if (ret_val) | ||
847 | e_dbg("Error initializing identification LED\n"); | ||
848 | /* This is not fatal and we should not stop init due to this */ | ||
849 | |||
850 | /* Disabling VLAN filtering */ | ||
851 | e_dbg("Initializing the IEEE VLAN\n"); | ||
852 | mac->ops.clear_vfta(hw); | ||
853 | |||
854 | /* Setup the receive address. */ | ||
855 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | ||
856 | |||
857 | /* Zero out the Multicast HASH table */ | ||
858 | e_dbg("Zeroing the MTA\n"); | ||
859 | for (i = 0; i < mac->mta_reg_count; i++) | ||
860 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | ||
861 | |||
862 | /* Setup link and flow control */ | ||
863 | ret_val = e1000e_setup_link(hw); | ||
864 | |||
865 | /* Disable IBIST slave mode (far-end loopback) */ | ||
866 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | ||
867 | &kum_reg_data); | ||
868 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; | ||
869 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | ||
870 | kum_reg_data); | ||
871 | |||
872 | /* Set the transmit descriptor write-back policy */ | ||
873 | reg_data = er32(TXDCTL(0)); | ||
874 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | ||
875 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | ||
876 | ew32(TXDCTL(0), reg_data); | ||
877 | |||
878 | /* ...for both queues. */ | ||
879 | reg_data = er32(TXDCTL(1)); | ||
880 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | ||
881 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | ||
882 | ew32(TXDCTL(1), reg_data); | ||
883 | |||
884 | /* Enable retransmit on late collisions */ | ||
885 | reg_data = er32(TCTL); | ||
886 | reg_data |= E1000_TCTL_RTLC; | ||
887 | ew32(TCTL, reg_data); | ||
888 | |||
889 | /* Configure Gigabit Carry Extend Padding */ | ||
890 | reg_data = er32(TCTL_EXT); | ||
891 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; | ||
892 | reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; | ||
893 | ew32(TCTL_EXT, reg_data); | ||
894 | |||
895 | /* Configure Transmit Inter-Packet Gap */ | ||
896 | reg_data = er32(TIPG); | ||
897 | reg_data &= ~E1000_TIPG_IPGT_MASK; | ||
898 | reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; | ||
899 | ew32(TIPG, reg_data); | ||
900 | |||
901 | reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); | ||
902 | reg_data &= ~0x00100000; | ||
903 | E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); | ||
904 | |||
905 | /* default to true to enable the MDIC W/A */ | ||
906 | hw->dev_spec.e80003es2lan.mdic_wa_enable = true; | ||
907 | |||
908 | ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | ||
909 | E1000_KMRNCTRLSTA_OFFSET >> | ||
910 | E1000_KMRNCTRLSTA_OFFSET_SHIFT, | ||
911 | &i); | ||
912 | if (!ret_val) { | ||
913 | if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == | ||
914 | E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) | ||
915 | hw->dev_spec.e80003es2lan.mdic_wa_enable = false; | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Clear all of the statistics registers (clear on read). It is | ||
920 | * important that we do this after we have tried to establish link | ||
921 | * because the symbol error count will increment wildly if there | ||
922 | * is no link. | ||
923 | */ | ||
924 | e1000_clear_hw_cntrs_80003es2lan(hw); | ||
925 | |||
926 | return ret_val; | ||
927 | } | ||
928 | |||
929 | /** | ||
930 | * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 | ||
931 | * @hw: pointer to the HW structure | ||
932 | * | ||
933 | * Initializes required hardware-dependent bits needed for normal operation. | ||
934 | **/ | ||
935 | static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) | ||
936 | { | ||
937 | u32 reg; | ||
938 | |||
939 | /* Transmit Descriptor Control 0 */ | ||
940 | reg = er32(TXDCTL(0)); | ||
941 | reg |= (1 << 22); | ||
942 | ew32(TXDCTL(0), reg); | ||
943 | |||
944 | /* Transmit Descriptor Control 1 */ | ||
945 | reg = er32(TXDCTL(1)); | ||
946 | reg |= (1 << 22); | ||
947 | ew32(TXDCTL(1), reg); | ||
948 | |||
949 | /* Transmit Arbitration Control 0 */ | ||
950 | reg = er32(TARC(0)); | ||
951 | reg &= ~(0xF << 27); /* 30:27 */ | ||
952 | if (hw->phy.media_type != e1000_media_type_copper) | ||
953 | reg &= ~(1 << 20); | ||
954 | ew32(TARC(0), reg); | ||
955 | |||
956 | /* Transmit Arbitration Control 1 */ | ||
957 | reg = er32(TARC(1)); | ||
958 | if (er32(TCTL) & E1000_TCTL_MULR) | ||
959 | reg &= ~(1 << 28); | ||
960 | else | ||
961 | reg |= (1 << 28); | ||
962 | ew32(TARC(1), reg); | ||
963 | } | ||
964 | |||
965 | /** | ||
966 | * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link | ||
967 | * @hw: pointer to the HW structure | ||
968 | * | ||
969 | * Setup some GG82563 PHY registers for obtaining link | ||
970 | **/ | ||
971 | static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | ||
972 | { | ||
973 | struct e1000_phy_info *phy = &hw->phy; | ||
974 | s32 ret_val; | ||
975 | u32 ctrl_ext; | ||
976 | u16 data; | ||
977 | |||
978 | ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); | ||
979 | if (ret_val) | ||
980 | return ret_val; | ||
981 | |||
982 | data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | ||
983 | /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ | ||
984 | data |= GG82563_MSCR_TX_CLK_1000MBPS_25; | ||
985 | |||
986 | ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); | ||
987 | if (ret_val) | ||
988 | return ret_val; | ||
989 | |||
990 | /* | ||
991 | * Options: | ||
992 | * MDI/MDI-X = 0 (default) | ||
993 | * 0 - Auto for all speeds | ||
994 | * 1 - MDI mode | ||
995 | * 2 - MDI-X mode | ||
996 | * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) | ||
997 | */ | ||
998 | ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); | ||
999 | if (ret_val) | ||
1000 | return ret_val; | ||
1001 | |||
1002 | data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; | ||
1003 | |||
1004 | switch (phy->mdix) { | ||
1005 | case 1: | ||
1006 | data |= GG82563_PSCR_CROSSOVER_MODE_MDI; | ||
1007 | break; | ||
1008 | case 2: | ||
1009 | data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; | ||
1010 | break; | ||
1011 | case 0: | ||
1012 | default: | ||
1013 | data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; | ||
1014 | break; | ||
1015 | } | ||
1016 | |||
1017 | /* | ||
1018 | * Options: | ||
1019 | * disable_polarity_correction = 0 (default) | ||
1020 | * Automatic Correction for Reversed Cable Polarity | ||
1021 | * 0 - Disabled | ||
1022 | * 1 - Enabled | ||
1023 | */ | ||
1024 | data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1025 | if (phy->disable_polarity_correction) | ||
1026 | data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; | ||
1027 | |||
1028 | ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); | ||
1029 | if (ret_val) | ||
1030 | return ret_val; | ||
1031 | |||
1032 | /* SW Reset the PHY so all changes take effect */ | ||
1033 | ret_val = e1000e_commit_phy(hw); | ||
1034 | if (ret_val) { | ||
1035 | e_dbg("Error Resetting the PHY\n"); | ||
1036 | return ret_val; | ||
1037 | } | ||
1038 | |||
1039 | /* Bypass Rx and Tx FIFO's */ | ||
1040 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | ||
1041 | E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, | ||
1042 | E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | | ||
1043 | E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); | ||
1044 | if (ret_val) | ||
1045 | return ret_val; | ||
1046 | |||
1047 | ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | ||
1048 | E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, | ||
1049 | &data); | ||
1050 | if (ret_val) | ||
1051 | return ret_val; | ||
1052 | data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; | ||
1053 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | ||
1054 | E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, | ||
1055 | data); | ||
1056 | if (ret_val) | ||
1057 | return ret_val; | ||
1058 | |||
1059 | ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); | ||
1060 | if (ret_val) | ||
1061 | return ret_val; | ||
1062 | |||
1063 | data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; | ||
1064 | ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); | ||
1065 | if (ret_val) | ||
1066 | return ret_val; | ||
1067 | |||
1068 | ctrl_ext = er32(CTRL_EXT); | ||
1069 | ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | ||
1070 | ew32(CTRL_EXT, ctrl_ext); | ||
1071 | |||
1072 | ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); | ||
1073 | if (ret_val) | ||
1074 | return ret_val; | ||
1075 | |||
1076 | /* | ||
1077 | * Do not init these registers when the HW is in IAMT mode, since the | ||
1078 | * firmware will have already initialized them. We only initialize | ||
1079 | * them if the HW is not in IAMT mode. | ||
1080 | */ | ||
1081 | if (!e1000e_check_mng_mode(hw)) { | ||
1082 | /* Enable Electrical Idle on the PHY */ | ||
1083 | data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; | ||
1084 | ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); | ||
1085 | if (ret_val) | ||
1086 | return ret_val; | ||
1087 | |||
1088 | ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); | ||
1089 | if (ret_val) | ||
1090 | return ret_val; | ||
1091 | |||
1092 | data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1093 | ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); | ||
1094 | if (ret_val) | ||
1095 | return ret_val; | ||
1096 | } | ||
1097 | |||
1098 | /* | ||
1099 | * Workaround: Disable padding in Kumeran interface in the MAC | ||
1100 | * and in the PHY to avoid CRC errors. | ||
1101 | */ | ||
1102 | ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); | ||
1103 | if (ret_val) | ||
1104 | return ret_val; | ||
1105 | |||
1106 | data |= GG82563_ICR_DIS_PADDING; | ||
1107 | ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); | ||
1108 | if (ret_val) | ||
1109 | return ret_val; | ||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | /** | ||
1115 | * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 | ||
1116 | * @hw: pointer to the HW structure | ||
1117 | * | ||
1118 | * Essentially a wrapper for setting up all things "copper" related. | ||
1119 | * This is a function pointer entry point called by the mac module. | ||
1120 | **/ | ||
1121 | static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) | ||
1122 | { | ||
1123 | u32 ctrl; | ||
1124 | s32 ret_val; | ||
1125 | u16 reg_data; | ||
1126 | |||
1127 | ctrl = er32(CTRL); | ||
1128 | ctrl |= E1000_CTRL_SLU; | ||
1129 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | ||
1130 | ew32(CTRL, ctrl); | ||
1131 | |||
1132 | /* | ||
1133 | * Set the mac to wait the maximum time between each | ||
1134 | * iteration and increase the max iterations when | ||
1135 | * polling the phy; this fixes erroneous timeouts at 10Mbps. | ||
1136 | */ | ||
1137 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), | ||
1138 | 0xFFFF); | ||
1139 | if (ret_val) | ||
1140 | return ret_val; | ||
1141 | ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), | ||
1142 | ®_data); | ||
1143 | if (ret_val) | ||
1144 | return ret_val; | ||
1145 | reg_data |= 0x3F; | ||
1146 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), | ||
1147 | reg_data); | ||
1148 | if (ret_val) | ||
1149 | return ret_val; | ||
1150 | ret_val = e1000_read_kmrn_reg_80003es2lan(hw, | ||
1151 | E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | ||
1152 | ®_data); | ||
1153 | if (ret_val) | ||
1154 | return ret_val; | ||
1155 | reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; | ||
1156 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | ||
1157 | E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | ||
1158 | reg_data); | ||
1159 | if (ret_val) | ||
1160 | return ret_val; | ||
1161 | |||
1162 | ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); | ||
1163 | if (ret_val) | ||
1164 | return ret_val; | ||
1165 | |||
1166 | ret_val = e1000e_setup_copper_link(hw); | ||
1167 | |||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1171 | /** | ||
1172 | * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up | ||
1173 | * @hw: pointer to the HW structure | ||
1174 | * @duplex: current duplex setting | ||
1175 | * | ||
1176 | * Configure the KMRN interface by applying last minute quirks for | ||
1177 | * 10/100 operation. | ||
1178 | **/ | ||
1179 | static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) | ||
1180 | { | ||
1181 | s32 ret_val = 0; | ||
1182 | u16 speed; | ||
1183 | u16 duplex; | ||
1184 | |||
1185 | if (hw->phy.media_type == e1000_media_type_copper) { | ||
1186 | ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, | ||
1187 | &duplex); | ||
1188 | if (ret_val) | ||
1189 | return ret_val; | ||
1190 | |||
1191 | if (speed == SPEED_1000) | ||
1192 | ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); | ||
1193 | else | ||
1194 | ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); | ||
1195 | } | ||
1196 | |||
1197 | return ret_val; | ||
1198 | } | ||
1199 | |||
1200 | /** | ||
1201 | * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation | ||
1202 | * @hw: pointer to the HW structure | ||
1203 | * @duplex: current duplex setting | ||
1204 | * | ||
1205 | * Configure the KMRN interface by applying last minute quirks for | ||
1206 | * 10/100 operation. | ||
1207 | **/ | ||
1208 | static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) | ||
1209 | { | ||
1210 | s32 ret_val; | ||
1211 | u32 tipg; | ||
1212 | u32 i = 0; | ||
1213 | u16 reg_data, reg_data2; | ||
1214 | |||
1215 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; | ||
1216 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | ||
1217 | E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | ||
1218 | reg_data); | ||
1219 | if (ret_val) | ||
1220 | return ret_val; | ||
1221 | |||
1222 | /* Configure Transmit Inter-Packet Gap */ | ||
1223 | tipg = er32(TIPG); | ||
1224 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1225 | tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; | ||
1226 | ew32(TIPG, tipg); | ||
1227 | |||
1228 | do { | ||
1229 | ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
1230 | if (ret_val) | ||
1231 | return ret_val; | ||
1232 | |||
1233 | ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); | ||
1234 | if (ret_val) | ||
1235 | return ret_val; | ||
1236 | i++; | ||
1237 | } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); | ||
1238 | |||
1239 | if (duplex == HALF_DUPLEX) | ||
1240 | reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1241 | else | ||
1242 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1243 | |||
1244 | ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
1245 | |||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | /** | ||
1250 | * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation | ||
1251 | * @hw: pointer to the HW structure | ||
1252 | * | ||
1253 | * Configure the KMRN interface by applying last minute quirks for | ||
1254 | * gigabit operation. | ||
1255 | **/ | ||
1256 | static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) | ||
1257 | { | ||
1258 | s32 ret_val; | ||
1259 | u16 reg_data, reg_data2; | ||
1260 | u32 tipg; | ||
1261 | u32 i = 0; | ||
1262 | |||
1263 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; | ||
1264 | ret_val = e1000_write_kmrn_reg_80003es2lan(hw, | ||
1265 | E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | ||
1266 | reg_data); | ||
1267 | if (ret_val) | ||
1268 | return ret_val; | ||
1269 | |||
1270 | /* Configure Transmit Inter-Packet Gap */ | ||
1271 | tipg = er32(TIPG); | ||
1272 | tipg &= ~E1000_TIPG_IPGT_MASK; | ||
1273 | tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; | ||
1274 | ew32(TIPG, tipg); | ||
1275 | |||
1276 | do { | ||
1277 | ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
1278 | if (ret_val) | ||
1279 | return ret_val; | ||
1280 | |||
1281 | ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); | ||
1282 | if (ret_val) | ||
1283 | return ret_val; | ||
1284 | i++; | ||
1285 | } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); | ||
1286 | |||
1287 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1288 | ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
1289 | |||
1290 | return ret_val; | ||
1291 | } | ||
1292 | |||
1293 | /** | ||
1294 | * e1000_read_kmrn_reg_80003es2lan - Read kumeran register | ||
1295 | * @hw: pointer to the HW structure | ||
1296 | * @offset: register offset to be read | ||
1297 | * @data: pointer to the read data | ||
1298 | * | ||
1299 | * Acquire semaphore, then read the PHY register at offset | ||
1300 | * using the kumeran interface. The information retrieved is stored in data. | ||
1301 | * Release the semaphore before exiting. | ||
1302 | **/ | ||
1303 | static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | ||
1304 | u16 *data) | ||
1305 | { | ||
1306 | u32 kmrnctrlsta; | ||
1307 | s32 ret_val = 0; | ||
1308 | |||
1309 | ret_val = e1000_acquire_mac_csr_80003es2lan(hw); | ||
1310 | if (ret_val) | ||
1311 | return ret_val; | ||
1312 | |||
1313 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | ||
1314 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | ||
1315 | ew32(KMRNCTRLSTA, kmrnctrlsta); | ||
1316 | e1e_flush(); | ||
1317 | |||
1318 | udelay(2); | ||
1319 | |||
1320 | kmrnctrlsta = er32(KMRNCTRLSTA); | ||
1321 | *data = (u16)kmrnctrlsta; | ||
1322 | |||
1323 | e1000_release_mac_csr_80003es2lan(hw); | ||
1324 | |||
1325 | return ret_val; | ||
1326 | } | ||
1327 | |||
1328 | /** | ||
1329 | * e1000_write_kmrn_reg_80003es2lan - Write kumeran register | ||
1330 | * @hw: pointer to the HW structure | ||
1331 | * @offset: register offset to write to | ||
1332 | * @data: data to write at register offset | ||
1333 | * | ||
1334 | * Acquire semaphore, then write the data to PHY register | ||
1335 | * at the offset using the kumeran interface. Release semaphore | ||
1336 | * before exiting. | ||
1337 | **/ | ||
1338 | static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | ||
1339 | u16 data) | ||
1340 | { | ||
1341 | u32 kmrnctrlsta; | ||
1342 | s32 ret_val = 0; | ||
1343 | |||
1344 | ret_val = e1000_acquire_mac_csr_80003es2lan(hw); | ||
1345 | if (ret_val) | ||
1346 | return ret_val; | ||
1347 | |||
1348 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | ||
1349 | E1000_KMRNCTRLSTA_OFFSET) | data; | ||
1350 | ew32(KMRNCTRLSTA, kmrnctrlsta); | ||
1351 | e1e_flush(); | ||
1352 | |||
1353 | udelay(2); | ||
1354 | |||
1355 | e1000_release_mac_csr_80003es2lan(hw); | ||
1356 | |||
1357 | return ret_val; | ||
1358 | } | ||
1359 | |||
1360 | /** | ||
1361 | * e1000_read_mac_addr_80003es2lan - Read device MAC address | ||
1362 | * @hw: pointer to the HW structure | ||
1363 | **/ | ||
1364 | static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) | ||
1365 | { | ||
1366 | s32 ret_val = 0; | ||
1367 | |||
1368 | /* | ||
1369 | * If there's an alternate MAC address place it in RAR0 | ||
1370 | * so that it will override the Si installed default perm | ||
1371 | * address. | ||
1372 | */ | ||
1373 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
1374 | if (ret_val) | ||
1375 | goto out; | ||
1376 | |||
1377 | ret_val = e1000_read_mac_addr_generic(hw); | ||
1378 | |||
1379 | out: | ||
1380 | return ret_val; | ||
1381 | } | ||
1382 | |||
1383 | /** | ||
1384 | * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down | ||
1385 | * @hw: pointer to the HW structure | ||
1386 | * | ||
1387 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1388 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1389 | **/ | ||
1390 | static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) | ||
1391 | { | ||
1392 | /* If the management interface is not enabled, then power down */ | ||
1393 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
1394 | hw->phy.ops.check_reset_block(hw))) | ||
1395 | e1000_power_down_phy_copper(hw); | ||
1396 | } | ||
1397 | |||
1398 | /** | ||
1399 | * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters | ||
1400 | * @hw: pointer to the HW structure | ||
1401 | * | ||
1402 | * Clears the hardware counters by reading the counter registers. | ||
1403 | **/ | ||
1404 | static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) | ||
1405 | { | ||
1406 | e1000e_clear_hw_cntrs_base(hw); | ||
1407 | |||
1408 | er32(PRC64); | ||
1409 | er32(PRC127); | ||
1410 | er32(PRC255); | ||
1411 | er32(PRC511); | ||
1412 | er32(PRC1023); | ||
1413 | er32(PRC1522); | ||
1414 | er32(PTC64); | ||
1415 | er32(PTC127); | ||
1416 | er32(PTC255); | ||
1417 | er32(PTC511); | ||
1418 | er32(PTC1023); | ||
1419 | er32(PTC1522); | ||
1420 | |||
1421 | er32(ALGNERRC); | ||
1422 | er32(RXERRC); | ||
1423 | er32(TNCRS); | ||
1424 | er32(CEXTERR); | ||
1425 | er32(TSCTC); | ||
1426 | er32(TSCTFC); | ||
1427 | |||
1428 | er32(MGTPRC); | ||
1429 | er32(MGTPDC); | ||
1430 | er32(MGTPTC); | ||
1431 | |||
1432 | er32(IAC); | ||
1433 | er32(ICRXOC); | ||
1434 | |||
1435 | er32(ICRXPTC); | ||
1436 | er32(ICRXATC); | ||
1437 | er32(ICTXPTC); | ||
1438 | er32(ICTXATC); | ||
1439 | er32(ICTXQEC); | ||
1440 | er32(ICTXQMTC); | ||
1441 | er32(ICRXDMTC); | ||
1442 | } | ||
1443 | |||
1444 | static struct e1000_mac_operations es2_mac_ops = { | ||
1445 | .read_mac_addr = e1000_read_mac_addr_80003es2lan, | ||
1446 | .id_led_init = e1000e_id_led_init, | ||
1447 | .blink_led = e1000e_blink_led_generic, | ||
1448 | .check_mng_mode = e1000e_check_mng_mode_generic, | ||
1449 | /* check_for_link dependent on media type */ | ||
1450 | .cleanup_led = e1000e_cleanup_led_generic, | ||
1451 | .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, | ||
1452 | .get_bus_info = e1000e_get_bus_info_pcie, | ||
1453 | .set_lan_id = e1000_set_lan_id_multi_port_pcie, | ||
1454 | .get_link_up_info = e1000_get_link_up_info_80003es2lan, | ||
1455 | .led_on = e1000e_led_on_generic, | ||
1456 | .led_off = e1000e_led_off_generic, | ||
1457 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, | ||
1458 | .write_vfta = e1000_write_vfta_generic, | ||
1459 | .clear_vfta = e1000_clear_vfta_generic, | ||
1460 | .reset_hw = e1000_reset_hw_80003es2lan, | ||
1461 | .init_hw = e1000_init_hw_80003es2lan, | ||
1462 | .setup_link = e1000e_setup_link, | ||
1463 | /* setup_physical_interface dependent on media type */ | ||
1464 | .setup_led = e1000e_setup_led_generic, | ||
1465 | }; | ||
1466 | |||
1467 | static struct e1000_phy_operations es2_phy_ops = { | ||
1468 | .acquire = e1000_acquire_phy_80003es2lan, | ||
1469 | .check_polarity = e1000_check_polarity_m88, | ||
1470 | .check_reset_block = e1000e_check_reset_block_generic, | ||
1471 | .commit = e1000e_phy_sw_reset, | ||
1472 | .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, | ||
1473 | .get_cfg_done = e1000_get_cfg_done_80003es2lan, | ||
1474 | .get_cable_length = e1000_get_cable_length_80003es2lan, | ||
1475 | .get_info = e1000e_get_phy_info_m88, | ||
1476 | .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, | ||
1477 | .release = e1000_release_phy_80003es2lan, | ||
1478 | .reset = e1000e_phy_hw_reset_generic, | ||
1479 | .set_d0_lplu_state = NULL, | ||
1480 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | ||
1481 | .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, | ||
1482 | .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, | ||
1483 | }; | ||
1484 | |||
1485 | static struct e1000_nvm_operations es2_nvm_ops = { | ||
1486 | .acquire = e1000_acquire_nvm_80003es2lan, | ||
1487 | .read = e1000e_read_nvm_eerd, | ||
1488 | .release = e1000_release_nvm_80003es2lan, | ||
1489 | .update = e1000e_update_nvm_checksum_generic, | ||
1490 | .valid_led_default = e1000e_valid_led_default, | ||
1491 | .validate = e1000e_validate_nvm_checksum_generic, | ||
1492 | .write = e1000_write_nvm_80003es2lan, | ||
1493 | }; | ||
1494 | |||
1495 | struct e1000_info e1000_es2_info = { | ||
1496 | .mac = e1000_80003es2lan, | ||
1497 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
1498 | | FLAG_HAS_JUMBO_FRAMES | ||
1499 | | FLAG_HAS_WOL | ||
1500 | | FLAG_APME_IN_CTRL3 | ||
1501 | | FLAG_RX_CSUM_ENABLED | ||
1502 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
1503 | | FLAG_RX_NEEDS_RESTART /* errata */ | ||
1504 | | FLAG_TARC_SET_BIT_ZERO /* errata */ | ||
1505 | | FLAG_APME_CHECK_PORT_B | ||
1506 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | ||
1507 | | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, | ||
1508 | .flags2 = FLAG2_DMA_BURST, | ||
1509 | .pba = 38, | ||
1510 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
1511 | .get_variants = e1000_get_variants_80003es2lan, | ||
1512 | .mac_ops = &es2_mac_ops, | ||
1513 | .phy_ops = &es2_phy_ops, | ||
1514 | .nvm_ops = &es2_nvm_ops, | ||
1515 | }; | ||
1516 | |||
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c new file mode 100644 index 000000000000..480f2592f8a5 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -0,0 +1,2115 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* | ||
30 | * 82571EB Gigabit Ethernet Controller | ||
31 | * 82571EB Gigabit Ethernet Controller (Copper) | ||
32 | * 82571EB Gigabit Ethernet Controller (Fiber) | ||
33 | * 82571EB Dual Port Gigabit Mezzanine Adapter | ||
34 | * 82571EB Quad Port Gigabit Mezzanine Adapter | ||
35 | * 82571PT Gigabit PT Quad Port Server ExpressModule | ||
36 | * 82572EI Gigabit Ethernet Controller (Copper) | ||
37 | * 82572EI Gigabit Ethernet Controller (Fiber) | ||
38 | * 82572EI Gigabit Ethernet Controller | ||
39 | * 82573V Gigabit Ethernet Controller (Copper) | ||
40 | * 82573E Gigabit Ethernet Controller (Copper) | ||
41 | * 82573L Gigabit Ethernet Controller | ||
42 | * 82574L Gigabit Network Connection | ||
43 | * 82583V Gigabit Network Connection | ||
44 | */ | ||
45 | |||
46 | #include "e1000.h" | ||
47 | |||
48 | #define ID_LED_RESERVED_F746 0xF746 | ||
49 | #define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ | ||
50 | (ID_LED_OFF1_ON2 << 8) | \ | ||
51 | (ID_LED_DEF1_DEF2 << 4) | \ | ||
52 | (ID_LED_DEF1_DEF2)) | ||
53 | |||
54 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | ||
55 | #define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ | ||
56 | #define E1000_BASE1000T_STATUS 10 | ||
57 | #define E1000_IDLE_ERROR_COUNT_MASK 0xFF | ||
58 | #define E1000_RECEIVE_ERROR_COUNTER 21 | ||
59 | #define E1000_RECEIVE_ERROR_MAX 0xFFFF | ||
60 | |||
61 | #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ | ||
62 | |||
63 | static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); | ||
64 | static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); | ||
65 | static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); | ||
66 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); | ||
67 | static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | ||
68 | u16 words, u16 *data); | ||
69 | static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); | ||
70 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); | ||
71 | static s32 e1000_setup_link_82571(struct e1000_hw *hw); | ||
72 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); | ||
73 | static void e1000_clear_vfta_82571(struct e1000_hw *hw); | ||
74 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); | ||
75 | static s32 e1000_led_on_82574(struct e1000_hw *hw); | ||
76 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | ||
77 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); | ||
78 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); | ||
79 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); | ||
80 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); | ||
81 | static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); | ||
82 | static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); | ||
83 | |||
84 | /** | ||
85 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | ||
86 | * @hw: pointer to the HW structure | ||
87 | **/ | ||
88 | static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | ||
89 | { | ||
90 | struct e1000_phy_info *phy = &hw->phy; | ||
91 | s32 ret_val; | ||
92 | |||
93 | if (hw->phy.media_type != e1000_media_type_copper) { | ||
94 | phy->type = e1000_phy_none; | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | phy->addr = 1; | ||
99 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
100 | phy->reset_delay_us = 100; | ||
101 | |||
102 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
103 | phy->ops.power_down = e1000_power_down_phy_copper_82571; | ||
104 | |||
105 | switch (hw->mac.type) { | ||
106 | case e1000_82571: | ||
107 | case e1000_82572: | ||
108 | phy->type = e1000_phy_igp_2; | ||
109 | break; | ||
110 | case e1000_82573: | ||
111 | phy->type = e1000_phy_m88; | ||
112 | break; | ||
113 | case e1000_82574: | ||
114 | case e1000_82583: | ||
115 | phy->type = e1000_phy_bm; | ||
116 | phy->ops.acquire = e1000_get_hw_semaphore_82574; | ||
117 | phy->ops.release = e1000_put_hw_semaphore_82574; | ||
118 | phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; | ||
119 | phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; | ||
120 | break; | ||
121 | default: | ||
122 | return -E1000_ERR_PHY; | ||
123 | break; | ||
124 | } | ||
125 | |||
126 | /* This can only be done after all function pointers are setup. */ | ||
127 | ret_val = e1000_get_phy_id_82571(hw); | ||
128 | if (ret_val) { | ||
129 | e_dbg("Error getting PHY ID\n"); | ||
130 | return ret_val; | ||
131 | } | ||
132 | |||
133 | /* Verify phy id */ | ||
134 | switch (hw->mac.type) { | ||
135 | case e1000_82571: | ||
136 | case e1000_82572: | ||
137 | if (phy->id != IGP01E1000_I_PHY_ID) | ||
138 | ret_val = -E1000_ERR_PHY; | ||
139 | break; | ||
140 | case e1000_82573: | ||
141 | if (phy->id != M88E1111_I_PHY_ID) | ||
142 | ret_val = -E1000_ERR_PHY; | ||
143 | break; | ||
144 | case e1000_82574: | ||
145 | case e1000_82583: | ||
146 | if (phy->id != BME1000_E_PHY_ID_R2) | ||
147 | ret_val = -E1000_ERR_PHY; | ||
148 | break; | ||
149 | default: | ||
150 | ret_val = -E1000_ERR_PHY; | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | if (ret_val) | ||
155 | e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); | ||
156 | |||
157 | return ret_val; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * e1000_init_nvm_params_82571 - Init NVM func ptrs. | ||
162 | * @hw: pointer to the HW structure | ||
163 | **/ | ||
164 | static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | ||
165 | { | ||
166 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
167 | u32 eecd = er32(EECD); | ||
168 | u16 size; | ||
169 | |||
170 | nvm->opcode_bits = 8; | ||
171 | nvm->delay_usec = 1; | ||
172 | switch (nvm->override) { | ||
173 | case e1000_nvm_override_spi_large: | ||
174 | nvm->page_size = 32; | ||
175 | nvm->address_bits = 16; | ||
176 | break; | ||
177 | case e1000_nvm_override_spi_small: | ||
178 | nvm->page_size = 8; | ||
179 | nvm->address_bits = 8; | ||
180 | break; | ||
181 | default: | ||
182 | nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; | ||
183 | nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | switch (hw->mac.type) { | ||
188 | case e1000_82573: | ||
189 | case e1000_82574: | ||
190 | case e1000_82583: | ||
191 | if (((eecd >> 15) & 0x3) == 0x3) { | ||
192 | nvm->type = e1000_nvm_flash_hw; | ||
193 | nvm->word_size = 2048; | ||
194 | /* | ||
195 | * Autonomous Flash update bit must be cleared due | ||
196 | * to Flash update issue. | ||
197 | */ | ||
198 | eecd &= ~E1000_EECD_AUPDEN; | ||
199 | ew32(EECD, eecd); | ||
200 | break; | ||
201 | } | ||
202 | /* Fall Through */ | ||
203 | default: | ||
204 | nvm->type = e1000_nvm_eeprom_spi; | ||
205 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | ||
206 | E1000_EECD_SIZE_EX_SHIFT); | ||
207 | /* | ||
208 | * Added to a constant, "size" becomes the left-shift value | ||
209 | * for setting word_size. | ||
210 | */ | ||
211 | size += NVM_WORD_SIZE_BASE_SHIFT; | ||
212 | |||
213 | /* EEPROM access above 16k is unsupported */ | ||
214 | if (size > 14) | ||
215 | size = 14; | ||
216 | nvm->word_size = 1 << size; | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | /* Function Pointers */ | ||
221 | switch (hw->mac.type) { | ||
222 | case e1000_82574: | ||
223 | case e1000_82583: | ||
224 | nvm->ops.acquire = e1000_get_hw_semaphore_82574; | ||
225 | nvm->ops.release = e1000_put_hw_semaphore_82574; | ||
226 | break; | ||
227 | default: | ||
228 | break; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * e1000_init_mac_params_82571 - Init MAC func ptrs. | ||
236 | * @hw: pointer to the HW structure | ||
237 | **/ | ||
238 | static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | ||
239 | { | ||
240 | struct e1000_hw *hw = &adapter->hw; | ||
241 | struct e1000_mac_info *mac = &hw->mac; | ||
242 | struct e1000_mac_operations *func = &mac->ops; | ||
243 | u32 swsm = 0; | ||
244 | u32 swsm2 = 0; | ||
245 | bool force_clear_smbi = false; | ||
246 | |||
247 | /* Set media type */ | ||
248 | switch (adapter->pdev->device) { | ||
249 | case E1000_DEV_ID_82571EB_FIBER: | ||
250 | case E1000_DEV_ID_82572EI_FIBER: | ||
251 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | ||
252 | hw->phy.media_type = e1000_media_type_fiber; | ||
253 | break; | ||
254 | case E1000_DEV_ID_82571EB_SERDES: | ||
255 | case E1000_DEV_ID_82572EI_SERDES: | ||
256 | case E1000_DEV_ID_82571EB_SERDES_DUAL: | ||
257 | case E1000_DEV_ID_82571EB_SERDES_QUAD: | ||
258 | hw->phy.media_type = e1000_media_type_internal_serdes; | ||
259 | break; | ||
260 | default: | ||
261 | hw->phy.media_type = e1000_media_type_copper; | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | /* Set mta register count */ | ||
266 | mac->mta_reg_count = 128; | ||
267 | /* Set rar entry count */ | ||
268 | mac->rar_entry_count = E1000_RAR_ENTRIES; | ||
269 | /* Adaptive IFS supported */ | ||
270 | mac->adaptive_ifs = true; | ||
271 | |||
272 | /* check for link */ | ||
273 | switch (hw->phy.media_type) { | ||
274 | case e1000_media_type_copper: | ||
275 | func->setup_physical_interface = e1000_setup_copper_link_82571; | ||
276 | func->check_for_link = e1000e_check_for_copper_link; | ||
277 | func->get_link_up_info = e1000e_get_speed_and_duplex_copper; | ||
278 | break; | ||
279 | case e1000_media_type_fiber: | ||
280 | func->setup_physical_interface = | ||
281 | e1000_setup_fiber_serdes_link_82571; | ||
282 | func->check_for_link = e1000e_check_for_fiber_link; | ||
283 | func->get_link_up_info = | ||
284 | e1000e_get_speed_and_duplex_fiber_serdes; | ||
285 | break; | ||
286 | case e1000_media_type_internal_serdes: | ||
287 | func->setup_physical_interface = | ||
288 | e1000_setup_fiber_serdes_link_82571; | ||
289 | func->check_for_link = e1000_check_for_serdes_link_82571; | ||
290 | func->get_link_up_info = | ||
291 | e1000e_get_speed_and_duplex_fiber_serdes; | ||
292 | break; | ||
293 | default: | ||
294 | return -E1000_ERR_CONFIG; | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | switch (hw->mac.type) { | ||
299 | case e1000_82573: | ||
300 | func->set_lan_id = e1000_set_lan_id_single_port; | ||
301 | func->check_mng_mode = e1000e_check_mng_mode_generic; | ||
302 | func->led_on = e1000e_led_on_generic; | ||
303 | func->blink_led = e1000e_blink_led_generic; | ||
304 | |||
305 | /* FWSM register */ | ||
306 | mac->has_fwsm = true; | ||
307 | /* | ||
308 | * ARC supported; valid only if manageability features are | ||
309 | * enabled. | ||
310 | */ | ||
311 | mac->arc_subsystem_valid = | ||
312 | (er32(FWSM) & E1000_FWSM_MODE_MASK) | ||
313 | ? true : false; | ||
314 | break; | ||
315 | case e1000_82574: | ||
316 | case e1000_82583: | ||
317 | func->set_lan_id = e1000_set_lan_id_single_port; | ||
318 | func->check_mng_mode = e1000_check_mng_mode_82574; | ||
319 | func->led_on = e1000_led_on_82574; | ||
320 | break; | ||
321 | default: | ||
322 | func->check_mng_mode = e1000e_check_mng_mode_generic; | ||
323 | func->led_on = e1000e_led_on_generic; | ||
324 | func->blink_led = e1000e_blink_led_generic; | ||
325 | |||
326 | /* FWSM register */ | ||
327 | mac->has_fwsm = true; | ||
328 | break; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Ensure that the inter-port SWSM.SMBI lock bit is clear before | ||
333 | * first NVM or PHY access. This should be done for single-port | ||
334 | * devices, and for one port only on dual-port devices so that | ||
335 | * for those devices we can still use the SMBI lock to synchronize | ||
336 | * inter-port accesses to the PHY & NVM. | ||
337 | */ | ||
338 | switch (hw->mac.type) { | ||
339 | case e1000_82571: | ||
340 | case e1000_82572: | ||
341 | swsm2 = er32(SWSM2); | ||
342 | |||
343 | if (!(swsm2 & E1000_SWSM2_LOCK)) { | ||
344 | /* Only do this for the first interface on this card */ | ||
345 | ew32(SWSM2, | ||
346 | swsm2 | E1000_SWSM2_LOCK); | ||
347 | force_clear_smbi = true; | ||
348 | } else | ||
349 | force_clear_smbi = false; | ||
350 | break; | ||
351 | default: | ||
352 | force_clear_smbi = true; | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | if (force_clear_smbi) { | ||
357 | /* Make sure SWSM.SMBI is clear */ | ||
358 | swsm = er32(SWSM); | ||
359 | if (swsm & E1000_SWSM_SMBI) { | ||
360 | /* This bit should not be set on a first interface, and | ||
361 | * indicates that the bootagent or EFI code has | ||
362 | * improperly left this bit enabled | ||
363 | */ | ||
364 | e_dbg("Please update your 82571 Bootagent\n"); | ||
365 | } | ||
366 | ew32(SWSM, swsm & ~E1000_SWSM_SMBI); | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * Initialize device specific counter of SMBI acquisition | ||
371 | * timeouts. | ||
372 | */ | ||
373 | hw->dev_spec.e82571.smb_counter = 0; | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | ||
379 | { | ||
380 | struct e1000_hw *hw = &adapter->hw; | ||
381 | static int global_quad_port_a; /* global port a indication */ | ||
382 | struct pci_dev *pdev = adapter->pdev; | ||
383 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; | ||
384 | s32 rc; | ||
385 | |||
386 | rc = e1000_init_mac_params_82571(adapter); | ||
387 | if (rc) | ||
388 | return rc; | ||
389 | |||
390 | rc = e1000_init_nvm_params_82571(hw); | ||
391 | if (rc) | ||
392 | return rc; | ||
393 | |||
394 | rc = e1000_init_phy_params_82571(hw); | ||
395 | if (rc) | ||
396 | return rc; | ||
397 | |||
398 | /* tag quad port adapters first, it's used below */ | ||
399 | switch (pdev->device) { | ||
400 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | ||
401 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | ||
402 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: | ||
403 | case E1000_DEV_ID_82571PT_QUAD_COPPER: | ||
404 | adapter->flags |= FLAG_IS_QUAD_PORT; | ||
405 | /* mark the first port */ | ||
406 | if (global_quad_port_a == 0) | ||
407 | adapter->flags |= FLAG_IS_QUAD_PORT_A; | ||
408 | /* Reset for multiple quad port adapters */ | ||
409 | global_quad_port_a++; | ||
410 | if (global_quad_port_a == 4) | ||
411 | global_quad_port_a = 0; | ||
412 | break; | ||
413 | default: | ||
414 | break; | ||
415 | } | ||
416 | |||
417 | switch (adapter->hw.mac.type) { | ||
418 | case e1000_82571: | ||
419 | /* these dual ports don't have WoL on port B at all */ | ||
420 | if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || | ||
421 | (pdev->device == E1000_DEV_ID_82571EB_SERDES) || | ||
422 | (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && | ||
423 | (is_port_b)) | ||
424 | adapter->flags &= ~FLAG_HAS_WOL; | ||
425 | /* quad ports only support WoL on port A */ | ||
426 | if (adapter->flags & FLAG_IS_QUAD_PORT && | ||
427 | (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) | ||
428 | adapter->flags &= ~FLAG_HAS_WOL; | ||
429 | /* Does not support WoL on any port */ | ||
430 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) | ||
431 | adapter->flags &= ~FLAG_HAS_WOL; | ||
432 | break; | ||
433 | case e1000_82573: | ||
434 | if (pdev->device == E1000_DEV_ID_82573L) { | ||
435 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; | ||
436 | adapter->max_hw_frame_size = DEFAULT_JUMBO; | ||
437 | } | ||
438 | break; | ||
439 | default: | ||
440 | break; | ||
441 | } | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision | ||
448 | * @hw: pointer to the HW structure | ||
449 | * | ||
450 | * Reads the PHY registers and stores the PHY ID and possibly the PHY | ||
451 | * revision in the hardware structure. | ||
452 | **/ | ||
453 | static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) | ||
454 | { | ||
455 | struct e1000_phy_info *phy = &hw->phy; | ||
456 | s32 ret_val; | ||
457 | u16 phy_id = 0; | ||
458 | |||
459 | switch (hw->mac.type) { | ||
460 | case e1000_82571: | ||
461 | case e1000_82572: | ||
462 | /* | ||
463 | * The 82571 firmware may still be configuring the PHY. | ||
464 | * In this case, we cannot access the PHY until the | ||
465 | * configuration is done. So we explicitly set the | ||
466 | * PHY ID. | ||
467 | */ | ||
468 | phy->id = IGP01E1000_I_PHY_ID; | ||
469 | break; | ||
470 | case e1000_82573: | ||
471 | return e1000e_get_phy_id(hw); | ||
472 | break; | ||
473 | case e1000_82574: | ||
474 | case e1000_82583: | ||
475 | ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); | ||
476 | if (ret_val) | ||
477 | return ret_val; | ||
478 | |||
479 | phy->id = (u32)(phy_id << 16); | ||
480 | udelay(20); | ||
481 | ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); | ||
482 | if (ret_val) | ||
483 | return ret_val; | ||
484 | |||
485 | phy->id |= (u32)(phy_id); | ||
486 | phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); | ||
487 | break; | ||
488 | default: | ||
489 | return -E1000_ERR_PHY; | ||
490 | break; | ||
491 | } | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore | ||
498 | * @hw: pointer to the HW structure | ||
499 | * | ||
500 | * Acquire the HW semaphore to access the PHY or NVM | ||
501 | **/ | ||
502 | static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) | ||
503 | { | ||
504 | u32 swsm; | ||
505 | s32 sw_timeout = hw->nvm.word_size + 1; | ||
506 | s32 fw_timeout = hw->nvm.word_size + 1; | ||
507 | s32 i = 0; | ||
508 | |||
509 | /* | ||
510 | * If we have timedout 3 times on trying to acquire | ||
511 | * the inter-port SMBI semaphore, there is old code | ||
512 | * operating on the other port, and it is not | ||
513 | * releasing SMBI. Modify the number of times that | ||
514 | * we try for the semaphore to interwork with this | ||
515 | * older code. | ||
516 | */ | ||
517 | if (hw->dev_spec.e82571.smb_counter > 2) | ||
518 | sw_timeout = 1; | ||
519 | |||
520 | /* Get the SW semaphore */ | ||
521 | while (i < sw_timeout) { | ||
522 | swsm = er32(SWSM); | ||
523 | if (!(swsm & E1000_SWSM_SMBI)) | ||
524 | break; | ||
525 | |||
526 | udelay(50); | ||
527 | i++; | ||
528 | } | ||
529 | |||
530 | if (i == sw_timeout) { | ||
531 | e_dbg("Driver can't access device - SMBI bit is set.\n"); | ||
532 | hw->dev_spec.e82571.smb_counter++; | ||
533 | } | ||
534 | /* Get the FW semaphore. */ | ||
535 | for (i = 0; i < fw_timeout; i++) { | ||
536 | swsm = er32(SWSM); | ||
537 | ew32(SWSM, swsm | E1000_SWSM_SWESMBI); | ||
538 | |||
539 | /* Semaphore acquired if bit latched */ | ||
540 | if (er32(SWSM) & E1000_SWSM_SWESMBI) | ||
541 | break; | ||
542 | |||
543 | udelay(50); | ||
544 | } | ||
545 | |||
546 | if (i == fw_timeout) { | ||
547 | /* Release semaphores */ | ||
548 | e1000_put_hw_semaphore_82571(hw); | ||
549 | e_dbg("Driver can't access the NVM\n"); | ||
550 | return -E1000_ERR_NVM; | ||
551 | } | ||
552 | |||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | /** | ||
557 | * e1000_put_hw_semaphore_82571 - Release hardware semaphore | ||
558 | * @hw: pointer to the HW structure | ||
559 | * | ||
560 | * Release hardware semaphore used to access the PHY or NVM | ||
561 | **/ | ||
562 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) | ||
563 | { | ||
564 | u32 swsm; | ||
565 | |||
566 | swsm = er32(SWSM); | ||
567 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | ||
568 | ew32(SWSM, swsm); | ||
569 | } | ||
570 | /** | ||
571 | * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore | ||
572 | * @hw: pointer to the HW structure | ||
573 | * | ||
574 | * Acquire the HW semaphore during reset. | ||
575 | * | ||
576 | **/ | ||
577 | static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) | ||
578 | { | ||
579 | u32 extcnf_ctrl; | ||
580 | s32 ret_val = 0; | ||
581 | s32 i = 0; | ||
582 | |||
583 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
584 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
585 | do { | ||
586 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
587 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
588 | |||
589 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | ||
590 | break; | ||
591 | |||
592 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
593 | |||
594 | usleep_range(2000, 4000); | ||
595 | i++; | ||
596 | } while (i < MDIO_OWNERSHIP_TIMEOUT); | ||
597 | |||
598 | if (i == MDIO_OWNERSHIP_TIMEOUT) { | ||
599 | /* Release semaphores */ | ||
600 | e1000_put_hw_semaphore_82573(hw); | ||
601 | e_dbg("Driver can't access the PHY\n"); | ||
602 | ret_val = -E1000_ERR_PHY; | ||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | out: | ||
607 | return ret_val; | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * e1000_put_hw_semaphore_82573 - Release hardware semaphore | ||
612 | * @hw: pointer to the HW structure | ||
613 | * | ||
614 | * Release hardware semaphore used during reset. | ||
615 | * | ||
616 | **/ | ||
617 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) | ||
618 | { | ||
619 | u32 extcnf_ctrl; | ||
620 | |||
621 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
622 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
623 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
624 | } | ||
625 | |||
626 | static DEFINE_MUTEX(swflag_mutex); | ||
627 | |||
628 | /** | ||
629 | * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore | ||
630 | * @hw: pointer to the HW structure | ||
631 | * | ||
632 | * Acquire the HW semaphore to access the PHY or NVM. | ||
633 | * | ||
634 | **/ | ||
635 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) | ||
636 | { | ||
637 | s32 ret_val; | ||
638 | |||
639 | mutex_lock(&swflag_mutex); | ||
640 | ret_val = e1000_get_hw_semaphore_82573(hw); | ||
641 | if (ret_val) | ||
642 | mutex_unlock(&swflag_mutex); | ||
643 | return ret_val; | ||
644 | } | ||
645 | |||
646 | /** | ||
647 | * e1000_put_hw_semaphore_82574 - Release hardware semaphore | ||
648 | * @hw: pointer to the HW structure | ||
649 | * | ||
650 | * Release hardware semaphore used to access the PHY or NVM | ||
651 | * | ||
652 | **/ | ||
653 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) | ||
654 | { | ||
655 | e1000_put_hw_semaphore_82573(hw); | ||
656 | mutex_unlock(&swflag_mutex); | ||
657 | } | ||
658 | |||
659 | /** | ||
660 | * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state | ||
661 | * @hw: pointer to the HW structure | ||
662 | * @active: true to enable LPLU, false to disable | ||
663 | * | ||
664 | * Sets the LPLU D0 state according to the active flag. | ||
665 | * LPLU will not be activated unless the | ||
666 | * device autonegotiation advertisement meets standards of | ||
667 | * either 10 or 10/100 or 10/100/1000 at all duplexes. | ||
668 | * This is a function pointer entry point only called by | ||
669 | * PHY setup routines. | ||
670 | **/ | ||
671 | static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) | ||
672 | { | ||
673 | u16 data = er32(POEMB); | ||
674 | |||
675 | if (active) | ||
676 | data |= E1000_PHY_CTRL_D0A_LPLU; | ||
677 | else | ||
678 | data &= ~E1000_PHY_CTRL_D0A_LPLU; | ||
679 | |||
680 | ew32(POEMB, data); | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 | ||
686 | * @hw: pointer to the HW structure | ||
687 | * @active: boolean used to enable/disable lplu | ||
688 | * | ||
689 | * The low power link up (lplu) state is set to the power management level D3 | ||
690 | * when active is true, else clear lplu for D3. LPLU | ||
691 | * is used during Dx states where the power conservation is most important. | ||
692 | * During driver activity, SmartSpeed should be enabled so performance is | ||
693 | * maintained. | ||
694 | **/ | ||
695 | static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) | ||
696 | { | ||
697 | u16 data = er32(POEMB); | ||
698 | |||
699 | if (!active) { | ||
700 | data &= ~E1000_PHY_CTRL_NOND0A_LPLU; | ||
701 | } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || | ||
702 | (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || | ||
703 | (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { | ||
704 | data |= E1000_PHY_CTRL_NOND0A_LPLU; | ||
705 | } | ||
706 | |||
707 | ew32(POEMB, data); | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | /** | ||
712 | * e1000_acquire_nvm_82571 - Request for access to the EEPROM | ||
713 | * @hw: pointer to the HW structure | ||
714 | * | ||
715 | * To gain access to the EEPROM, first we must obtain a hardware semaphore. | ||
716 | * Then for non-82573 hardware, set the EEPROM access request bit and wait | ||
717 | * for EEPROM access grant bit. If the access grant bit is not set, release | ||
718 | * hardware semaphore. | ||
719 | **/ | ||
720 | static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) | ||
721 | { | ||
722 | s32 ret_val; | ||
723 | |||
724 | ret_val = e1000_get_hw_semaphore_82571(hw); | ||
725 | if (ret_val) | ||
726 | return ret_val; | ||
727 | |||
728 | switch (hw->mac.type) { | ||
729 | case e1000_82573: | ||
730 | break; | ||
731 | default: | ||
732 | ret_val = e1000e_acquire_nvm(hw); | ||
733 | break; | ||
734 | } | ||
735 | |||
736 | if (ret_val) | ||
737 | e1000_put_hw_semaphore_82571(hw); | ||
738 | |||
739 | return ret_val; | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * e1000_release_nvm_82571 - Release exclusive access to EEPROM | ||
744 | * @hw: pointer to the HW structure | ||
745 | * | ||
746 | * Stop any current commands to the EEPROM and clear the EEPROM request bit. | ||
747 | **/ | ||
748 | static void e1000_release_nvm_82571(struct e1000_hw *hw) | ||
749 | { | ||
750 | e1000e_release_nvm(hw); | ||
751 | e1000_put_hw_semaphore_82571(hw); | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface | ||
756 | * @hw: pointer to the HW structure | ||
757 | * @offset: offset within the EEPROM to be written to | ||
758 | * @words: number of words to write | ||
759 | * @data: 16 bit word(s) to be written to the EEPROM | ||
760 | * | ||
761 | * For non-82573 silicon, write data to EEPROM at offset using SPI interface. | ||
762 | * | ||
763 | * If e1000e_update_nvm_checksum is not called after this function, the | ||
764 | * EEPROM will most likely contain an invalid checksum. | ||
765 | **/ | ||
766 | static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, | ||
767 | u16 *data) | ||
768 | { | ||
769 | s32 ret_val; | ||
770 | |||
771 | switch (hw->mac.type) { | ||
772 | case e1000_82573: | ||
773 | case e1000_82574: | ||
774 | case e1000_82583: | ||
775 | ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); | ||
776 | break; | ||
777 | case e1000_82571: | ||
778 | case e1000_82572: | ||
779 | ret_val = e1000e_write_nvm_spi(hw, offset, words, data); | ||
780 | break; | ||
781 | default: | ||
782 | ret_val = -E1000_ERR_NVM; | ||
783 | break; | ||
784 | } | ||
785 | |||
786 | return ret_val; | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * e1000_update_nvm_checksum_82571 - Update EEPROM checksum | ||
791 | * @hw: pointer to the HW structure | ||
792 | * | ||
793 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | ||
794 | * up to the checksum. Then calculates the EEPROM checksum and writes the | ||
795 | * value to the EEPROM. | ||
796 | **/ | ||
797 | static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) | ||
798 | { | ||
799 | u32 eecd; | ||
800 | s32 ret_val; | ||
801 | u16 i; | ||
802 | |||
803 | ret_val = e1000e_update_nvm_checksum_generic(hw); | ||
804 | if (ret_val) | ||
805 | return ret_val; | ||
806 | |||
807 | /* | ||
808 | * If our nvm is an EEPROM, then we're done | ||
809 | * otherwise, commit the checksum to the flash NVM. | ||
810 | */ | ||
811 | if (hw->nvm.type != e1000_nvm_flash_hw) | ||
812 | return ret_val; | ||
813 | |||
814 | /* Check for pending operations. */ | ||
815 | for (i = 0; i < E1000_FLASH_UPDATES; i++) { | ||
816 | usleep_range(1000, 2000); | ||
817 | if ((er32(EECD) & E1000_EECD_FLUPD) == 0) | ||
818 | break; | ||
819 | } | ||
820 | |||
821 | if (i == E1000_FLASH_UPDATES) | ||
822 | return -E1000_ERR_NVM; | ||
823 | |||
824 | /* Reset the firmware if using STM opcode. */ | ||
825 | if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { | ||
826 | /* | ||
827 | * The enabling of and the actual reset must be done | ||
828 | * in two write cycles. | ||
829 | */ | ||
830 | ew32(HICR, E1000_HICR_FW_RESET_ENABLE); | ||
831 | e1e_flush(); | ||
832 | ew32(HICR, E1000_HICR_FW_RESET); | ||
833 | } | ||
834 | |||
835 | /* Commit the write to flash */ | ||
836 | eecd = er32(EECD) | E1000_EECD_FLUPD; | ||
837 | ew32(EECD, eecd); | ||
838 | |||
839 | for (i = 0; i < E1000_FLASH_UPDATES; i++) { | ||
840 | usleep_range(1000, 2000); | ||
841 | if ((er32(EECD) & E1000_EECD_FLUPD) == 0) | ||
842 | break; | ||
843 | } | ||
844 | |||
845 | if (i == E1000_FLASH_UPDATES) | ||
846 | return -E1000_ERR_NVM; | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum | ||
853 | * @hw: pointer to the HW structure | ||
854 | * | ||
855 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | ||
856 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | ||
857 | **/ | ||
858 | static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) | ||
859 | { | ||
860 | if (hw->nvm.type == e1000_nvm_flash_hw) | ||
861 | e1000_fix_nvm_checksum_82571(hw); | ||
862 | |||
863 | return e1000e_validate_nvm_checksum_generic(hw); | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon | ||
868 | * @hw: pointer to the HW structure | ||
869 | * @offset: offset within the EEPROM to be written to | ||
870 | * @words: number of words to write | ||
871 | * @data: 16 bit word(s) to be written to the EEPROM | ||
872 | * | ||
873 | * After checking for invalid values, poll the EEPROM to ensure the previous | ||
874 | * command has completed before trying to write the next word. After write | ||
875 | * poll for completion. | ||
876 | * | ||
877 | * If e1000e_update_nvm_checksum is not called after this function, the | ||
878 | * EEPROM will most likely contain an invalid checksum. | ||
879 | **/ | ||
880 | static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | ||
881 | u16 words, u16 *data) | ||
882 | { | ||
883 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
884 | u32 i, eewr = 0; | ||
885 | s32 ret_val = 0; | ||
886 | |||
887 | /* | ||
888 | * A check for invalid values: offset too large, too many words, | ||
889 | * and not enough words. | ||
890 | */ | ||
891 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | ||
892 | (words == 0)) { | ||
893 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
894 | return -E1000_ERR_NVM; | ||
895 | } | ||
896 | |||
897 | for (i = 0; i < words; i++) { | ||
898 | eewr = (data[i] << E1000_NVM_RW_REG_DATA) | | ||
899 | ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | ||
900 | E1000_NVM_RW_REG_START; | ||
901 | |||
902 | ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); | ||
903 | if (ret_val) | ||
904 | break; | ||
905 | |||
906 | ew32(EEWR, eewr); | ||
907 | |||
908 | ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); | ||
909 | if (ret_val) | ||
910 | break; | ||
911 | } | ||
912 | |||
913 | return ret_val; | ||
914 | } | ||
915 | |||
916 | /** | ||
917 | * e1000_get_cfg_done_82571 - Poll for configuration done | ||
918 | * @hw: pointer to the HW structure | ||
919 | * | ||
920 | * Reads the management control register for the config done bit to be set. | ||
921 | **/ | ||
922 | static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) | ||
923 | { | ||
924 | s32 timeout = PHY_CFG_TIMEOUT; | ||
925 | |||
926 | while (timeout) { | ||
927 | if (er32(EEMNGCTL) & | ||
928 | E1000_NVM_CFG_DONE_PORT_0) | ||
929 | break; | ||
930 | usleep_range(1000, 2000); | ||
931 | timeout--; | ||
932 | } | ||
933 | if (!timeout) { | ||
934 | e_dbg("MNG configuration cycle has not completed.\n"); | ||
935 | return -E1000_ERR_RESET; | ||
936 | } | ||
937 | |||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state | ||
943 | * @hw: pointer to the HW structure | ||
944 | * @active: true to enable LPLU, false to disable | ||
945 | * | ||
946 | * Sets the LPLU D0 state according to the active flag. When activating LPLU | ||
947 | * this function also disables smart speed and vice versa. LPLU will not be | ||
948 | * activated unless the device autonegotiation advertisement meets standards | ||
949 | * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function | ||
950 | * pointer entry point only called by PHY setup routines. | ||
951 | **/ | ||
952 | static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | ||
953 | { | ||
954 | struct e1000_phy_info *phy = &hw->phy; | ||
955 | s32 ret_val; | ||
956 | u16 data; | ||
957 | |||
958 | ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); | ||
959 | if (ret_val) | ||
960 | return ret_val; | ||
961 | |||
962 | if (active) { | ||
963 | data |= IGP02E1000_PM_D0_LPLU; | ||
964 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | ||
965 | if (ret_val) | ||
966 | return ret_val; | ||
967 | |||
968 | /* When LPLU is enabled, we should disable SmartSpeed */ | ||
969 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | ||
970 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
971 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | ||
972 | if (ret_val) | ||
973 | return ret_val; | ||
974 | } else { | ||
975 | data &= ~IGP02E1000_PM_D0_LPLU; | ||
976 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | ||
977 | /* | ||
978 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
979 | * during Dx states where the power conservation is most | ||
980 | * important. During driver activity we should enable | ||
981 | * SmartSpeed, so performance is maintained. | ||
982 | */ | ||
983 | if (phy->smart_speed == e1000_smart_speed_on) { | ||
984 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
985 | &data); | ||
986 | if (ret_val) | ||
987 | return ret_val; | ||
988 | |||
989 | data |= IGP01E1000_PSCFR_SMART_SPEED; | ||
990 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
991 | data); | ||
992 | if (ret_val) | ||
993 | return ret_val; | ||
994 | } else if (phy->smart_speed == e1000_smart_speed_off) { | ||
995 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
996 | &data); | ||
997 | if (ret_val) | ||
998 | return ret_val; | ||
999 | |||
1000 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
1001 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1002 | data); | ||
1003 | if (ret_val) | ||
1004 | return ret_val; | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | /** | ||
1012 | * e1000_reset_hw_82571 - Reset hardware | ||
1013 | * @hw: pointer to the HW structure | ||
1014 | * | ||
1015 | * This resets the hardware into a known state. | ||
1016 | **/ | ||
1017 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | ||
1018 | { | ||
1019 | u32 ctrl, ctrl_ext; | ||
1020 | s32 ret_val; | ||
1021 | |||
1022 | /* | ||
1023 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
1024 | * on the last TLP read/write transaction when MAC is reset. | ||
1025 | */ | ||
1026 | ret_val = e1000e_disable_pcie_master(hw); | ||
1027 | if (ret_val) | ||
1028 | e_dbg("PCI-E Master disable polling has failed.\n"); | ||
1029 | |||
1030 | e_dbg("Masking off all interrupts\n"); | ||
1031 | ew32(IMC, 0xffffffff); | ||
1032 | |||
1033 | ew32(RCTL, 0); | ||
1034 | ew32(TCTL, E1000_TCTL_PSP); | ||
1035 | e1e_flush(); | ||
1036 | |||
1037 | usleep_range(10000, 20000); | ||
1038 | |||
1039 | /* | ||
1040 | * Must acquire the MDIO ownership before MAC reset. | ||
1041 | * Ownership defaults to firmware after a reset. | ||
1042 | */ | ||
1043 | switch (hw->mac.type) { | ||
1044 | case e1000_82573: | ||
1045 | ret_val = e1000_get_hw_semaphore_82573(hw); | ||
1046 | break; | ||
1047 | case e1000_82574: | ||
1048 | case e1000_82583: | ||
1049 | ret_val = e1000_get_hw_semaphore_82574(hw); | ||
1050 | break; | ||
1051 | default: | ||
1052 | break; | ||
1053 | } | ||
1054 | if (ret_val) | ||
1055 | e_dbg("Cannot acquire MDIO ownership\n"); | ||
1056 | |||
1057 | ctrl = er32(CTRL); | ||
1058 | |||
1059 | e_dbg("Issuing a global reset to MAC\n"); | ||
1060 | ew32(CTRL, ctrl | E1000_CTRL_RST); | ||
1061 | |||
1062 | /* Must release MDIO ownership and mutex after MAC reset. */ | ||
1063 | switch (hw->mac.type) { | ||
1064 | case e1000_82574: | ||
1065 | case e1000_82583: | ||
1066 | e1000_put_hw_semaphore_82574(hw); | ||
1067 | break; | ||
1068 | default: | ||
1069 | break; | ||
1070 | } | ||
1071 | |||
1072 | if (hw->nvm.type == e1000_nvm_flash_hw) { | ||
1073 | udelay(10); | ||
1074 | ctrl_ext = er32(CTRL_EXT); | ||
1075 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | ||
1076 | ew32(CTRL_EXT, ctrl_ext); | ||
1077 | e1e_flush(); | ||
1078 | } | ||
1079 | |||
1080 | ret_val = e1000e_get_auto_rd_done(hw); | ||
1081 | if (ret_val) | ||
1082 | /* We don't want to continue accessing MAC registers. */ | ||
1083 | return ret_val; | ||
1084 | |||
1085 | /* | ||
1086 | * Phy configuration from NVM just starts after EECD_AUTO_RD is set. | ||
1087 | * Need to wait for Phy configuration completion before accessing | ||
1088 | * NVM and Phy. | ||
1089 | */ | ||
1090 | |||
1091 | switch (hw->mac.type) { | ||
1092 | case e1000_82573: | ||
1093 | case e1000_82574: | ||
1094 | case e1000_82583: | ||
1095 | msleep(25); | ||
1096 | break; | ||
1097 | default: | ||
1098 | break; | ||
1099 | } | ||
1100 | |||
1101 | /* Clear any pending interrupt events. */ | ||
1102 | ew32(IMC, 0xffffffff); | ||
1103 | er32(ICR); | ||
1104 | |||
1105 | if (hw->mac.type == e1000_82571) { | ||
1106 | /* Install any alternate MAC address into RAR0 */ | ||
1107 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
1108 | if (ret_val) | ||
1109 | return ret_val; | ||
1110 | |||
1111 | e1000e_set_laa_state_82571(hw, true); | ||
1112 | } | ||
1113 | |||
1114 | /* Reinitialize the 82571 serdes link state machine */ | ||
1115 | if (hw->phy.media_type == e1000_media_type_internal_serdes) | ||
1116 | hw->mac.serdes_link_state = e1000_serdes_link_down; | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | /** | ||
1122 | * e1000_init_hw_82571 - Initialize hardware | ||
1123 | * @hw: pointer to the HW structure | ||
1124 | * | ||
1125 | * This inits the hardware readying it for operation. | ||
1126 | **/ | ||
1127 | static s32 e1000_init_hw_82571(struct e1000_hw *hw) | ||
1128 | { | ||
1129 | struct e1000_mac_info *mac = &hw->mac; | ||
1130 | u32 reg_data; | ||
1131 | s32 ret_val; | ||
1132 | u16 i, rar_count = mac->rar_entry_count; | ||
1133 | |||
1134 | e1000_initialize_hw_bits_82571(hw); | ||
1135 | |||
1136 | /* Initialize identification LED */ | ||
1137 | ret_val = e1000e_id_led_init(hw); | ||
1138 | if (ret_val) | ||
1139 | e_dbg("Error initializing identification LED\n"); | ||
1140 | /* This is not fatal and we should not stop init due to this */ | ||
1141 | |||
1142 | /* Disabling VLAN filtering */ | ||
1143 | e_dbg("Initializing the IEEE VLAN\n"); | ||
1144 | mac->ops.clear_vfta(hw); | ||
1145 | |||
1146 | /* Setup the receive address. */ | ||
1147 | /* | ||
1148 | * If, however, a locally administered address was assigned to the | ||
1149 | * 82571, we must reserve a RAR for it to work around an issue where | ||
1150 | * resetting one port will reload the MAC on the other port. | ||
1151 | */ | ||
1152 | if (e1000e_get_laa_state_82571(hw)) | ||
1153 | rar_count--; | ||
1154 | e1000e_init_rx_addrs(hw, rar_count); | ||
1155 | |||
1156 | /* Zero out the Multicast HASH table */ | ||
1157 | e_dbg("Zeroing the MTA\n"); | ||
1158 | for (i = 0; i < mac->mta_reg_count; i++) | ||
1159 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | ||
1160 | |||
1161 | /* Setup link and flow control */ | ||
1162 | ret_val = e1000_setup_link_82571(hw); | ||
1163 | |||
1164 | /* Set the transmit descriptor write-back policy */ | ||
1165 | reg_data = er32(TXDCTL(0)); | ||
1166 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | ||
1167 | E1000_TXDCTL_FULL_TX_DESC_WB | | ||
1168 | E1000_TXDCTL_COUNT_DESC; | ||
1169 | ew32(TXDCTL(0), reg_data); | ||
1170 | |||
1171 | /* ...for both queues. */ | ||
1172 | switch (mac->type) { | ||
1173 | case e1000_82573: | ||
1174 | e1000e_enable_tx_pkt_filtering(hw); | ||
1175 | /* fall through */ | ||
1176 | case e1000_82574: | ||
1177 | case e1000_82583: | ||
1178 | reg_data = er32(GCR); | ||
1179 | reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; | ||
1180 | ew32(GCR, reg_data); | ||
1181 | break; | ||
1182 | default: | ||
1183 | reg_data = er32(TXDCTL(1)); | ||
1184 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | ||
1185 | E1000_TXDCTL_FULL_TX_DESC_WB | | ||
1186 | E1000_TXDCTL_COUNT_DESC; | ||
1187 | ew32(TXDCTL(1), reg_data); | ||
1188 | break; | ||
1189 | } | ||
1190 | |||
1191 | /* | ||
1192 | * Clear all of the statistics registers (clear on read). It is | ||
1193 | * important that we do this after we have tried to establish link | ||
1194 | * because the symbol error count will increment wildly if there | ||
1195 | * is no link. | ||
1196 | */ | ||
1197 | e1000_clear_hw_cntrs_82571(hw); | ||
1198 | |||
1199 | return ret_val; | ||
1200 | } | ||
1201 | |||
1202 | /** | ||
1203 | * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits | ||
1204 | * @hw: pointer to the HW structure | ||
1205 | * | ||
1206 | * Initializes required hardware-dependent bits needed for normal operation. | ||
1207 | **/ | ||
1208 | static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | ||
1209 | { | ||
1210 | u32 reg; | ||
1211 | |||
1212 | /* Transmit Descriptor Control 0 */ | ||
1213 | reg = er32(TXDCTL(0)); | ||
1214 | reg |= (1 << 22); | ||
1215 | ew32(TXDCTL(0), reg); | ||
1216 | |||
1217 | /* Transmit Descriptor Control 1 */ | ||
1218 | reg = er32(TXDCTL(1)); | ||
1219 | reg |= (1 << 22); | ||
1220 | ew32(TXDCTL(1), reg); | ||
1221 | |||
1222 | /* Transmit Arbitration Control 0 */ | ||
1223 | reg = er32(TARC(0)); | ||
1224 | reg &= ~(0xF << 27); /* 30:27 */ | ||
1225 | switch (hw->mac.type) { | ||
1226 | case e1000_82571: | ||
1227 | case e1000_82572: | ||
1228 | reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); | ||
1229 | break; | ||
1230 | default: | ||
1231 | break; | ||
1232 | } | ||
1233 | ew32(TARC(0), reg); | ||
1234 | |||
1235 | /* Transmit Arbitration Control 1 */ | ||
1236 | reg = er32(TARC(1)); | ||
1237 | switch (hw->mac.type) { | ||
1238 | case e1000_82571: | ||
1239 | case e1000_82572: | ||
1240 | reg &= ~((1 << 29) | (1 << 30)); | ||
1241 | reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); | ||
1242 | if (er32(TCTL) & E1000_TCTL_MULR) | ||
1243 | reg &= ~(1 << 28); | ||
1244 | else | ||
1245 | reg |= (1 << 28); | ||
1246 | ew32(TARC(1), reg); | ||
1247 | break; | ||
1248 | default: | ||
1249 | break; | ||
1250 | } | ||
1251 | |||
1252 | /* Device Control */ | ||
1253 | switch (hw->mac.type) { | ||
1254 | case e1000_82573: | ||
1255 | case e1000_82574: | ||
1256 | case e1000_82583: | ||
1257 | reg = er32(CTRL); | ||
1258 | reg &= ~(1 << 29); | ||
1259 | ew32(CTRL, reg); | ||
1260 | break; | ||
1261 | default: | ||
1262 | break; | ||
1263 | } | ||
1264 | |||
1265 | /* Extended Device Control */ | ||
1266 | switch (hw->mac.type) { | ||
1267 | case e1000_82573: | ||
1268 | case e1000_82574: | ||
1269 | case e1000_82583: | ||
1270 | reg = er32(CTRL_EXT); | ||
1271 | reg &= ~(1 << 23); | ||
1272 | reg |= (1 << 22); | ||
1273 | ew32(CTRL_EXT, reg); | ||
1274 | break; | ||
1275 | default: | ||
1276 | break; | ||
1277 | } | ||
1278 | |||
1279 | if (hw->mac.type == e1000_82571) { | ||
1280 | reg = er32(PBA_ECC); | ||
1281 | reg |= E1000_PBA_ECC_CORR_EN; | ||
1282 | ew32(PBA_ECC, reg); | ||
1283 | } | ||
1284 | /* | ||
1285 | * Workaround for hardware errata. | ||
1286 | * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 | ||
1287 | */ | ||
1288 | |||
1289 | if ((hw->mac.type == e1000_82571) || | ||
1290 | (hw->mac.type == e1000_82572)) { | ||
1291 | reg = er32(CTRL_EXT); | ||
1292 | reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; | ||
1293 | ew32(CTRL_EXT, reg); | ||
1294 | } | ||
1295 | |||
1296 | |||
1297 | /* PCI-Ex Control Registers */ | ||
1298 | switch (hw->mac.type) { | ||
1299 | case e1000_82574: | ||
1300 | case e1000_82583: | ||
1301 | reg = er32(GCR); | ||
1302 | reg |= (1 << 22); | ||
1303 | ew32(GCR, reg); | ||
1304 | |||
1305 | /* | ||
1306 | * Workaround for hardware errata. | ||
1307 | * apply workaround for hardware errata documented in errata | ||
1308 | * docs Fixes issue where some error prone or unreliable PCIe | ||
1309 | * completions are occurring, particularly with ASPM enabled. | ||
1310 | * Without fix, issue can cause Tx timeouts. | ||
1311 | */ | ||
1312 | reg = er32(GCR2); | ||
1313 | reg |= 1; | ||
1314 | ew32(GCR2, reg); | ||
1315 | break; | ||
1316 | default: | ||
1317 | break; | ||
1318 | } | ||
1319 | } | ||
1320 | |||
1321 | /** | ||
1322 | * e1000_clear_vfta_82571 - Clear VLAN filter table | ||
1323 | * @hw: pointer to the HW structure | ||
1324 | * | ||
1325 | * Clears the register array which contains the VLAN filter table by | ||
1326 | * setting all the values to 0. | ||
1327 | **/ | ||
1328 | static void e1000_clear_vfta_82571(struct e1000_hw *hw) | ||
1329 | { | ||
1330 | u32 offset; | ||
1331 | u32 vfta_value = 0; | ||
1332 | u32 vfta_offset = 0; | ||
1333 | u32 vfta_bit_in_reg = 0; | ||
1334 | |||
1335 | switch (hw->mac.type) { | ||
1336 | case e1000_82573: | ||
1337 | case e1000_82574: | ||
1338 | case e1000_82583: | ||
1339 | if (hw->mng_cookie.vlan_id != 0) { | ||
1340 | /* | ||
1341 | * The VFTA is a 4096b bit-field, each identifying | ||
1342 | * a single VLAN ID. The following operations | ||
1343 | * determine which 32b entry (i.e. offset) into the | ||
1344 | * array we want to set the VLAN ID (i.e. bit) of | ||
1345 | * the manageability unit. | ||
1346 | */ | ||
1347 | vfta_offset = (hw->mng_cookie.vlan_id >> | ||
1348 | E1000_VFTA_ENTRY_SHIFT) & | ||
1349 | E1000_VFTA_ENTRY_MASK; | ||
1350 | vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & | ||
1351 | E1000_VFTA_ENTRY_BIT_SHIFT_MASK); | ||
1352 | } | ||
1353 | break; | ||
1354 | default: | ||
1355 | break; | ||
1356 | } | ||
1357 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | ||
1358 | /* | ||
1359 | * If the offset we want to clear is the same offset of the | ||
1360 | * manageability VLAN ID, then clear all bits except that of | ||
1361 | * the manageability unit. | ||
1362 | */ | ||
1363 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | ||
1364 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); | ||
1365 | e1e_flush(); | ||
1366 | } | ||
1367 | } | ||
1368 | |||
1369 | /** | ||
1370 | * e1000_check_mng_mode_82574 - Check manageability is enabled | ||
1371 | * @hw: pointer to the HW structure | ||
1372 | * | ||
1373 | * Reads the NVM Initialization Control Word 2 and returns true | ||
1374 | * (>0) if any manageability is enabled, else false (0). | ||
1375 | **/ | ||
1376 | static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) | ||
1377 | { | ||
1378 | u16 data; | ||
1379 | |||
1380 | e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); | ||
1381 | return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; | ||
1382 | } | ||
1383 | |||
1384 | /** | ||
1385 | * e1000_led_on_82574 - Turn LED on | ||
1386 | * @hw: pointer to the HW structure | ||
1387 | * | ||
1388 | * Turn LED on. | ||
1389 | **/ | ||
1390 | static s32 e1000_led_on_82574(struct e1000_hw *hw) | ||
1391 | { | ||
1392 | u32 ctrl; | ||
1393 | u32 i; | ||
1394 | |||
1395 | ctrl = hw->mac.ledctl_mode2; | ||
1396 | if (!(E1000_STATUS_LU & er32(STATUS))) { | ||
1397 | /* | ||
1398 | * If no link, then turn LED on by setting the invert bit | ||
1399 | * for each LED that's "on" (0x0E) in ledctl_mode2. | ||
1400 | */ | ||
1401 | for (i = 0; i < 4; i++) | ||
1402 | if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == | ||
1403 | E1000_LEDCTL_MODE_LED_ON) | ||
1404 | ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); | ||
1405 | } | ||
1406 | ew32(LEDCTL, ctrl); | ||
1407 | |||
1408 | return 0; | ||
1409 | } | ||
1410 | |||
1411 | /** | ||
1412 | * e1000_check_phy_82574 - check 82574 phy hung state | ||
1413 | * @hw: pointer to the HW structure | ||
1414 | * | ||
1415 | * Returns whether phy is hung or not | ||
1416 | **/ | ||
1417 | bool e1000_check_phy_82574(struct e1000_hw *hw) | ||
1418 | { | ||
1419 | u16 status_1kbt = 0; | ||
1420 | u16 receive_errors = 0; | ||
1421 | bool phy_hung = false; | ||
1422 | s32 ret_val = 0; | ||
1423 | |||
1424 | /* | ||
1425 | * Read PHY Receive Error counter first, if its is max - all F's then | ||
1426 | * read the Base1000T status register If both are max then PHY is hung. | ||
1427 | */ | ||
1428 | ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); | ||
1429 | |||
1430 | if (ret_val) | ||
1431 | goto out; | ||
1432 | if (receive_errors == E1000_RECEIVE_ERROR_MAX) { | ||
1433 | ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); | ||
1434 | if (ret_val) | ||
1435 | goto out; | ||
1436 | if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == | ||
1437 | E1000_IDLE_ERROR_COUNT_MASK) | ||
1438 | phy_hung = true; | ||
1439 | } | ||
1440 | out: | ||
1441 | return phy_hung; | ||
1442 | } | ||
1443 | |||
1444 | /** | ||
1445 | * e1000_setup_link_82571 - Setup flow control and link settings | ||
1446 | * @hw: pointer to the HW structure | ||
1447 | * | ||
1448 | * Determines which flow control settings to use, then configures flow | ||
1449 | * control. Calls the appropriate media-specific link configuration | ||
1450 | * function. Assuming the adapter has a valid link partner, a valid link | ||
1451 | * should be established. Assumes the hardware has previously been reset | ||
1452 | * and the transmitter and receiver are not enabled. | ||
1453 | **/ | ||
1454 | static s32 e1000_setup_link_82571(struct e1000_hw *hw) | ||
1455 | { | ||
1456 | /* | ||
1457 | * 82573 does not have a word in the NVM to determine | ||
1458 | * the default flow control setting, so we explicitly | ||
1459 | * set it to full. | ||
1460 | */ | ||
1461 | switch (hw->mac.type) { | ||
1462 | case e1000_82573: | ||
1463 | case e1000_82574: | ||
1464 | case e1000_82583: | ||
1465 | if (hw->fc.requested_mode == e1000_fc_default) | ||
1466 | hw->fc.requested_mode = e1000_fc_full; | ||
1467 | break; | ||
1468 | default: | ||
1469 | break; | ||
1470 | } | ||
1471 | |||
1472 | return e1000e_setup_link(hw); | ||
1473 | } | ||
1474 | |||
1475 | /** | ||
1476 | * e1000_setup_copper_link_82571 - Configure copper link settings | ||
1477 | * @hw: pointer to the HW structure | ||
1478 | * | ||
1479 | * Configures the link for auto-neg or forced speed and duplex. Then we check | ||
1480 | * for link, once link is established calls to configure collision distance | ||
1481 | * and flow control are called. | ||
1482 | **/ | ||
1483 | static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) | ||
1484 | { | ||
1485 | u32 ctrl; | ||
1486 | s32 ret_val; | ||
1487 | |||
1488 | ctrl = er32(CTRL); | ||
1489 | ctrl |= E1000_CTRL_SLU; | ||
1490 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | ||
1491 | ew32(CTRL, ctrl); | ||
1492 | |||
1493 | switch (hw->phy.type) { | ||
1494 | case e1000_phy_m88: | ||
1495 | case e1000_phy_bm: | ||
1496 | ret_val = e1000e_copper_link_setup_m88(hw); | ||
1497 | break; | ||
1498 | case e1000_phy_igp_2: | ||
1499 | ret_val = e1000e_copper_link_setup_igp(hw); | ||
1500 | break; | ||
1501 | default: | ||
1502 | return -E1000_ERR_PHY; | ||
1503 | break; | ||
1504 | } | ||
1505 | |||
1506 | if (ret_val) | ||
1507 | return ret_val; | ||
1508 | |||
1509 | ret_val = e1000e_setup_copper_link(hw); | ||
1510 | |||
1511 | return ret_val; | ||
1512 | } | ||
1513 | |||
1514 | /** | ||
1515 | * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes | ||
1516 | * @hw: pointer to the HW structure | ||
1517 | * | ||
1518 | * Configures collision distance and flow control for fiber and serdes links. | ||
1519 | * Upon successful setup, poll for link. | ||
1520 | **/ | ||
1521 | static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) | ||
1522 | { | ||
1523 | switch (hw->mac.type) { | ||
1524 | case e1000_82571: | ||
1525 | case e1000_82572: | ||
1526 | /* | ||
1527 | * If SerDes loopback mode is entered, there is no form | ||
1528 | * of reset to take the adapter out of that mode. So we | ||
1529 | * have to explicitly take the adapter out of loopback | ||
1530 | * mode. This prevents drivers from twiddling their thumbs | ||
1531 | * if another tool failed to take it out of loopback mode. | ||
1532 | */ | ||
1533 | ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); | ||
1534 | break; | ||
1535 | default: | ||
1536 | break; | ||
1537 | } | ||
1538 | |||
1539 | return e1000e_setup_fiber_serdes_link(hw); | ||
1540 | } | ||
1541 | |||
1542 | /** | ||
1543 | * e1000_check_for_serdes_link_82571 - Check for link (Serdes) | ||
1544 | * @hw: pointer to the HW structure | ||
1545 | * | ||
1546 | * Reports the link state as up or down. | ||
1547 | * | ||
1548 | * If autonegotiation is supported by the link partner, the link state is | ||
1549 | * determined by the result of autonegotiation. This is the most likely case. | ||
1550 | * If autonegotiation is not supported by the link partner, and the link | ||
1551 | * has a valid signal, force the link up. | ||
1552 | * | ||
1553 | * The link state is represented internally here by 4 states: | ||
1554 | * | ||
1555 | * 1) down | ||
1556 | * 2) autoneg_progress | ||
1557 | * 3) autoneg_complete (the link successfully autonegotiated) | ||
1558 | * 4) forced_up (the link has been forced up, it did not autonegotiate) | ||
1559 | * | ||
1560 | **/ | ||
1561 | static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | ||
1562 | { | ||
1563 | struct e1000_mac_info *mac = &hw->mac; | ||
1564 | u32 rxcw; | ||
1565 | u32 ctrl; | ||
1566 | u32 status; | ||
1567 | u32 txcw; | ||
1568 | u32 i; | ||
1569 | s32 ret_val = 0; | ||
1570 | |||
1571 | ctrl = er32(CTRL); | ||
1572 | status = er32(STATUS); | ||
1573 | rxcw = er32(RXCW); | ||
1574 | |||
1575 | if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { | ||
1576 | |||
1577 | /* Receiver is synchronized with no invalid bits. */ | ||
1578 | switch (mac->serdes_link_state) { | ||
1579 | case e1000_serdes_link_autoneg_complete: | ||
1580 | if (!(status & E1000_STATUS_LU)) { | ||
1581 | /* | ||
1582 | * We have lost link, retry autoneg before | ||
1583 | * reporting link failure | ||
1584 | */ | ||
1585 | mac->serdes_link_state = | ||
1586 | e1000_serdes_link_autoneg_progress; | ||
1587 | mac->serdes_has_link = false; | ||
1588 | e_dbg("AN_UP -> AN_PROG\n"); | ||
1589 | } else { | ||
1590 | mac->serdes_has_link = true; | ||
1591 | } | ||
1592 | break; | ||
1593 | |||
1594 | case e1000_serdes_link_forced_up: | ||
1595 | /* | ||
1596 | * If we are receiving /C/ ordered sets, re-enable | ||
1597 | * auto-negotiation in the TXCW register and disable | ||
1598 | * forced link in the Device Control register in an | ||
1599 | * attempt to auto-negotiate with our link partner. | ||
1600 | * If the partner code word is null, stop forcing | ||
1601 | * and restart auto negotiation. | ||
1602 | */ | ||
1603 | if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { | ||
1604 | /* Enable autoneg, and unforce link up */ | ||
1605 | ew32(TXCW, mac->txcw); | ||
1606 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | ||
1607 | mac->serdes_link_state = | ||
1608 | e1000_serdes_link_autoneg_progress; | ||
1609 | mac->serdes_has_link = false; | ||
1610 | e_dbg("FORCED_UP -> AN_PROG\n"); | ||
1611 | } else { | ||
1612 | mac->serdes_has_link = true; | ||
1613 | } | ||
1614 | break; | ||
1615 | |||
1616 | case e1000_serdes_link_autoneg_progress: | ||
1617 | if (rxcw & E1000_RXCW_C) { | ||
1618 | /* | ||
1619 | * We received /C/ ordered sets, meaning the | ||
1620 | * link partner has autonegotiated, and we can | ||
1621 | * trust the Link Up (LU) status bit. | ||
1622 | */ | ||
1623 | if (status & E1000_STATUS_LU) { | ||
1624 | mac->serdes_link_state = | ||
1625 | e1000_serdes_link_autoneg_complete; | ||
1626 | e_dbg("AN_PROG -> AN_UP\n"); | ||
1627 | mac->serdes_has_link = true; | ||
1628 | } else { | ||
1629 | /* Autoneg completed, but failed. */ | ||
1630 | mac->serdes_link_state = | ||
1631 | e1000_serdes_link_down; | ||
1632 | e_dbg("AN_PROG -> DOWN\n"); | ||
1633 | } | ||
1634 | } else { | ||
1635 | /* | ||
1636 | * The link partner did not autoneg. | ||
1637 | * Force link up and full duplex, and change | ||
1638 | * state to forced. | ||
1639 | */ | ||
1640 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | ||
1641 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | ||
1642 | ew32(CTRL, ctrl); | ||
1643 | |||
1644 | /* Configure Flow Control after link up. */ | ||
1645 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
1646 | if (ret_val) { | ||
1647 | e_dbg("Error config flow control\n"); | ||
1648 | break; | ||
1649 | } | ||
1650 | mac->serdes_link_state = | ||
1651 | e1000_serdes_link_forced_up; | ||
1652 | mac->serdes_has_link = true; | ||
1653 | e_dbg("AN_PROG -> FORCED_UP\n"); | ||
1654 | } | ||
1655 | break; | ||
1656 | |||
1657 | case e1000_serdes_link_down: | ||
1658 | default: | ||
1659 | /* | ||
1660 | * The link was down but the receiver has now gained | ||
1661 | * valid sync, so lets see if we can bring the link | ||
1662 | * up. | ||
1663 | */ | ||
1664 | ew32(TXCW, mac->txcw); | ||
1665 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | ||
1666 | mac->serdes_link_state = | ||
1667 | e1000_serdes_link_autoneg_progress; | ||
1668 | mac->serdes_has_link = false; | ||
1669 | e_dbg("DOWN -> AN_PROG\n"); | ||
1670 | break; | ||
1671 | } | ||
1672 | } else { | ||
1673 | if (!(rxcw & E1000_RXCW_SYNCH)) { | ||
1674 | mac->serdes_has_link = false; | ||
1675 | mac->serdes_link_state = e1000_serdes_link_down; | ||
1676 | e_dbg("ANYSTATE -> DOWN\n"); | ||
1677 | } else { | ||
1678 | /* | ||
1679 | * Check several times, if Sync and Config | ||
1680 | * both are consistently 1 then simply ignore | ||
1681 | * the Invalid bit and restart Autoneg | ||
1682 | */ | ||
1683 | for (i = 0; i < AN_RETRY_COUNT; i++) { | ||
1684 | udelay(10); | ||
1685 | rxcw = er32(RXCW); | ||
1686 | if ((rxcw & E1000_RXCW_IV) && | ||
1687 | !((rxcw & E1000_RXCW_SYNCH) && | ||
1688 | (rxcw & E1000_RXCW_C))) { | ||
1689 | mac->serdes_has_link = false; | ||
1690 | mac->serdes_link_state = | ||
1691 | e1000_serdes_link_down; | ||
1692 | e_dbg("ANYSTATE -> DOWN\n"); | ||
1693 | break; | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | if (i == AN_RETRY_COUNT) { | ||
1698 | txcw = er32(TXCW); | ||
1699 | txcw |= E1000_TXCW_ANE; | ||
1700 | ew32(TXCW, txcw); | ||
1701 | mac->serdes_link_state = | ||
1702 | e1000_serdes_link_autoneg_progress; | ||
1703 | mac->serdes_has_link = false; | ||
1704 | e_dbg("ANYSTATE -> AN_PROG\n"); | ||
1705 | } | ||
1706 | } | ||
1707 | } | ||
1708 | |||
1709 | return ret_val; | ||
1710 | } | ||
1711 | |||
1712 | /** | ||
1713 | * e1000_valid_led_default_82571 - Verify a valid default LED config | ||
1714 | * @hw: pointer to the HW structure | ||
1715 | * @data: pointer to the NVM (EEPROM) | ||
1716 | * | ||
1717 | * Read the EEPROM for the current default LED configuration. If the | ||
1718 | * LED configuration is not valid, set to a valid LED configuration. | ||
1719 | **/ | ||
1720 | static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | ||
1721 | { | ||
1722 | s32 ret_val; | ||
1723 | |||
1724 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | ||
1725 | if (ret_val) { | ||
1726 | e_dbg("NVM Read Error\n"); | ||
1727 | return ret_val; | ||
1728 | } | ||
1729 | |||
1730 | switch (hw->mac.type) { | ||
1731 | case e1000_82573: | ||
1732 | case e1000_82574: | ||
1733 | case e1000_82583: | ||
1734 | if (*data == ID_LED_RESERVED_F746) | ||
1735 | *data = ID_LED_DEFAULT_82573; | ||
1736 | break; | ||
1737 | default: | ||
1738 | if (*data == ID_LED_RESERVED_0000 || | ||
1739 | *data == ID_LED_RESERVED_FFFF) | ||
1740 | *data = ID_LED_DEFAULT; | ||
1741 | break; | ||
1742 | } | ||
1743 | |||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | /** | ||
1748 | * e1000e_get_laa_state_82571 - Get locally administered address state | ||
1749 | * @hw: pointer to the HW structure | ||
1750 | * | ||
1751 | * Retrieve and return the current locally administered address state. | ||
1752 | **/ | ||
1753 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | ||
1754 | { | ||
1755 | if (hw->mac.type != e1000_82571) | ||
1756 | return false; | ||
1757 | |||
1758 | return hw->dev_spec.e82571.laa_is_present; | ||
1759 | } | ||
1760 | |||
1761 | /** | ||
1762 | * e1000e_set_laa_state_82571 - Set locally administered address state | ||
1763 | * @hw: pointer to the HW structure | ||
1764 | * @state: enable/disable locally administered address | ||
1765 | * | ||
1766 | * Enable/Disable the current locally administered address state. | ||
1767 | **/ | ||
1768 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) | ||
1769 | { | ||
1770 | if (hw->mac.type != e1000_82571) | ||
1771 | return; | ||
1772 | |||
1773 | hw->dev_spec.e82571.laa_is_present = state; | ||
1774 | |||
1775 | /* If workaround is activated... */ | ||
1776 | if (state) | ||
1777 | /* | ||
1778 | * Hold a copy of the LAA in RAR[14] This is done so that | ||
1779 | * between the time RAR[0] gets clobbered and the time it | ||
1780 | * gets fixed, the actual LAA is in one of the RARs and no | ||
1781 | * incoming packets directed to this port are dropped. | ||
1782 | * Eventually the LAA will be in RAR[0] and RAR[14]. | ||
1783 | */ | ||
1784 | e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); | ||
1785 | } | ||
1786 | |||
1787 | /** | ||
1788 | * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum | ||
1789 | * @hw: pointer to the HW structure | ||
1790 | * | ||
1791 | * Verifies that the EEPROM has completed the update. After updating the | ||
1792 | * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If | ||
1793 | * the checksum fix is not implemented, we need to set the bit and update | ||
1794 | * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, | ||
1795 | * we need to return bad checksum. | ||
1796 | **/ | ||
1797 | static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | ||
1798 | { | ||
1799 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
1800 | s32 ret_val; | ||
1801 | u16 data; | ||
1802 | |||
1803 | if (nvm->type != e1000_nvm_flash_hw) | ||
1804 | return 0; | ||
1805 | |||
1806 | /* | ||
1807 | * Check bit 4 of word 10h. If it is 0, firmware is done updating | ||
1808 | * 10h-12h. Checksum may need to be fixed. | ||
1809 | */ | ||
1810 | ret_val = e1000_read_nvm(hw, 0x10, 1, &data); | ||
1811 | if (ret_val) | ||
1812 | return ret_val; | ||
1813 | |||
1814 | if (!(data & 0x10)) { | ||
1815 | /* | ||
1816 | * Read 0x23 and check bit 15. This bit is a 1 | ||
1817 | * when the checksum has already been fixed. If | ||
1818 | * the checksum is still wrong and this bit is a | ||
1819 | * 1, we need to return bad checksum. Otherwise, | ||
1820 | * we need to set this bit to a 1 and update the | ||
1821 | * checksum. | ||
1822 | */ | ||
1823 | ret_val = e1000_read_nvm(hw, 0x23, 1, &data); | ||
1824 | if (ret_val) | ||
1825 | return ret_val; | ||
1826 | |||
1827 | if (!(data & 0x8000)) { | ||
1828 | data |= 0x8000; | ||
1829 | ret_val = e1000_write_nvm(hw, 0x23, 1, &data); | ||
1830 | if (ret_val) | ||
1831 | return ret_val; | ||
1832 | ret_val = e1000e_update_nvm_checksum(hw); | ||
1833 | } | ||
1834 | } | ||
1835 | |||
1836 | return 0; | ||
1837 | } | ||
1838 | |||
1839 | /** | ||
1840 | * e1000_read_mac_addr_82571 - Read device MAC address | ||
1841 | * @hw: pointer to the HW structure | ||
1842 | **/ | ||
1843 | static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) | ||
1844 | { | ||
1845 | s32 ret_val = 0; | ||
1846 | |||
1847 | if (hw->mac.type == e1000_82571) { | ||
1848 | /* | ||
1849 | * If there's an alternate MAC address place it in RAR0 | ||
1850 | * so that it will override the Si installed default perm | ||
1851 | * address. | ||
1852 | */ | ||
1853 | ret_val = e1000_check_alt_mac_addr_generic(hw); | ||
1854 | if (ret_val) | ||
1855 | goto out; | ||
1856 | } | ||
1857 | |||
1858 | ret_val = e1000_read_mac_addr_generic(hw); | ||
1859 | |||
1860 | out: | ||
1861 | return ret_val; | ||
1862 | } | ||
1863 | |||
1864 | /** | ||
1865 | * e1000_power_down_phy_copper_82571 - Remove link during PHY power down | ||
1866 | * @hw: pointer to the HW structure | ||
1867 | * | ||
1868 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1869 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1870 | **/ | ||
1871 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) | ||
1872 | { | ||
1873 | struct e1000_phy_info *phy = &hw->phy; | ||
1874 | struct e1000_mac_info *mac = &hw->mac; | ||
1875 | |||
1876 | if (!(phy->ops.check_reset_block)) | ||
1877 | return; | ||
1878 | |||
1879 | /* If the management interface is not enabled, then power down */ | ||
1880 | if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) | ||
1881 | e1000_power_down_phy_copper(hw); | ||
1882 | } | ||
1883 | |||
1884 | /** | ||
1885 | * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters | ||
1886 | * @hw: pointer to the HW structure | ||
1887 | * | ||
1888 | * Clears the hardware counters by reading the counter registers. | ||
1889 | **/ | ||
1890 | static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) | ||
1891 | { | ||
1892 | e1000e_clear_hw_cntrs_base(hw); | ||
1893 | |||
1894 | er32(PRC64); | ||
1895 | er32(PRC127); | ||
1896 | er32(PRC255); | ||
1897 | er32(PRC511); | ||
1898 | er32(PRC1023); | ||
1899 | er32(PRC1522); | ||
1900 | er32(PTC64); | ||
1901 | er32(PTC127); | ||
1902 | er32(PTC255); | ||
1903 | er32(PTC511); | ||
1904 | er32(PTC1023); | ||
1905 | er32(PTC1522); | ||
1906 | |||
1907 | er32(ALGNERRC); | ||
1908 | er32(RXERRC); | ||
1909 | er32(TNCRS); | ||
1910 | er32(CEXTERR); | ||
1911 | er32(TSCTC); | ||
1912 | er32(TSCTFC); | ||
1913 | |||
1914 | er32(MGTPRC); | ||
1915 | er32(MGTPDC); | ||
1916 | er32(MGTPTC); | ||
1917 | |||
1918 | er32(IAC); | ||
1919 | er32(ICRXOC); | ||
1920 | |||
1921 | er32(ICRXPTC); | ||
1922 | er32(ICRXATC); | ||
1923 | er32(ICTXPTC); | ||
1924 | er32(ICTXATC); | ||
1925 | er32(ICTXQEC); | ||
1926 | er32(ICTXQMTC); | ||
1927 | er32(ICRXDMTC); | ||
1928 | } | ||
1929 | |||
1930 | static struct e1000_mac_operations e82571_mac_ops = { | ||
1931 | /* .check_mng_mode: mac type dependent */ | ||
1932 | /* .check_for_link: media type dependent */ | ||
1933 | .id_led_init = e1000e_id_led_init, | ||
1934 | .cleanup_led = e1000e_cleanup_led_generic, | ||
1935 | .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, | ||
1936 | .get_bus_info = e1000e_get_bus_info_pcie, | ||
1937 | .set_lan_id = e1000_set_lan_id_multi_port_pcie, | ||
1938 | /* .get_link_up_info: media type dependent */ | ||
1939 | /* .led_on: mac type dependent */ | ||
1940 | .led_off = e1000e_led_off_generic, | ||
1941 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, | ||
1942 | .write_vfta = e1000_write_vfta_generic, | ||
1943 | .clear_vfta = e1000_clear_vfta_82571, | ||
1944 | .reset_hw = e1000_reset_hw_82571, | ||
1945 | .init_hw = e1000_init_hw_82571, | ||
1946 | .setup_link = e1000_setup_link_82571, | ||
1947 | /* .setup_physical_interface: media type dependent */ | ||
1948 | .setup_led = e1000e_setup_led_generic, | ||
1949 | .read_mac_addr = e1000_read_mac_addr_82571, | ||
1950 | }; | ||
1951 | |||
1952 | static struct e1000_phy_operations e82_phy_ops_igp = { | ||
1953 | .acquire = e1000_get_hw_semaphore_82571, | ||
1954 | .check_polarity = e1000_check_polarity_igp, | ||
1955 | .check_reset_block = e1000e_check_reset_block_generic, | ||
1956 | .commit = NULL, | ||
1957 | .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, | ||
1958 | .get_cfg_done = e1000_get_cfg_done_82571, | ||
1959 | .get_cable_length = e1000e_get_cable_length_igp_2, | ||
1960 | .get_info = e1000e_get_phy_info_igp, | ||
1961 | .read_reg = e1000e_read_phy_reg_igp, | ||
1962 | .release = e1000_put_hw_semaphore_82571, | ||
1963 | .reset = e1000e_phy_hw_reset_generic, | ||
1964 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | ||
1965 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | ||
1966 | .write_reg = e1000e_write_phy_reg_igp, | ||
1967 | .cfg_on_link_up = NULL, | ||
1968 | }; | ||
1969 | |||
1970 | static struct e1000_phy_operations e82_phy_ops_m88 = { | ||
1971 | .acquire = e1000_get_hw_semaphore_82571, | ||
1972 | .check_polarity = e1000_check_polarity_m88, | ||
1973 | .check_reset_block = e1000e_check_reset_block_generic, | ||
1974 | .commit = e1000e_phy_sw_reset, | ||
1975 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | ||
1976 | .get_cfg_done = e1000e_get_cfg_done, | ||
1977 | .get_cable_length = e1000e_get_cable_length_m88, | ||
1978 | .get_info = e1000e_get_phy_info_m88, | ||
1979 | .read_reg = e1000e_read_phy_reg_m88, | ||
1980 | .release = e1000_put_hw_semaphore_82571, | ||
1981 | .reset = e1000e_phy_hw_reset_generic, | ||
1982 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | ||
1983 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | ||
1984 | .write_reg = e1000e_write_phy_reg_m88, | ||
1985 | .cfg_on_link_up = NULL, | ||
1986 | }; | ||
1987 | |||
1988 | static struct e1000_phy_operations e82_phy_ops_bm = { | ||
1989 | .acquire = e1000_get_hw_semaphore_82571, | ||
1990 | .check_polarity = e1000_check_polarity_m88, | ||
1991 | .check_reset_block = e1000e_check_reset_block_generic, | ||
1992 | .commit = e1000e_phy_sw_reset, | ||
1993 | .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, | ||
1994 | .get_cfg_done = e1000e_get_cfg_done, | ||
1995 | .get_cable_length = e1000e_get_cable_length_m88, | ||
1996 | .get_info = e1000e_get_phy_info_m88, | ||
1997 | .read_reg = e1000e_read_phy_reg_bm2, | ||
1998 | .release = e1000_put_hw_semaphore_82571, | ||
1999 | .reset = e1000e_phy_hw_reset_generic, | ||
2000 | .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, | ||
2001 | .set_d3_lplu_state = e1000e_set_d3_lplu_state, | ||
2002 | .write_reg = e1000e_write_phy_reg_bm2, | ||
2003 | .cfg_on_link_up = NULL, | ||
2004 | }; | ||
2005 | |||
2006 | static struct e1000_nvm_operations e82571_nvm_ops = { | ||
2007 | .acquire = e1000_acquire_nvm_82571, | ||
2008 | .read = e1000e_read_nvm_eerd, | ||
2009 | .release = e1000_release_nvm_82571, | ||
2010 | .update = e1000_update_nvm_checksum_82571, | ||
2011 | .valid_led_default = e1000_valid_led_default_82571, | ||
2012 | .validate = e1000_validate_nvm_checksum_82571, | ||
2013 | .write = e1000_write_nvm_82571, | ||
2014 | }; | ||
2015 | |||
2016 | struct e1000_info e1000_82571_info = { | ||
2017 | .mac = e1000_82571, | ||
2018 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
2019 | | FLAG_HAS_JUMBO_FRAMES | ||
2020 | | FLAG_HAS_WOL | ||
2021 | | FLAG_APME_IN_CTRL3 | ||
2022 | | FLAG_RX_CSUM_ENABLED | ||
2023 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
2024 | | FLAG_HAS_SMART_POWER_DOWN | ||
2025 | | FLAG_RESET_OVERWRITES_LAA /* errata */ | ||
2026 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ | ||
2027 | | FLAG_APME_CHECK_PORT_B, | ||
2028 | .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | ||
2029 | | FLAG2_DMA_BURST, | ||
2030 | .pba = 38, | ||
2031 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
2032 | .get_variants = e1000_get_variants_82571, | ||
2033 | .mac_ops = &e82571_mac_ops, | ||
2034 | .phy_ops = &e82_phy_ops_igp, | ||
2035 | .nvm_ops = &e82571_nvm_ops, | ||
2036 | }; | ||
2037 | |||
2038 | struct e1000_info e1000_82572_info = { | ||
2039 | .mac = e1000_82572, | ||
2040 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
2041 | | FLAG_HAS_JUMBO_FRAMES | ||
2042 | | FLAG_HAS_WOL | ||
2043 | | FLAG_APME_IN_CTRL3 | ||
2044 | | FLAG_RX_CSUM_ENABLED | ||
2045 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
2046 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ | ||
2047 | .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | ||
2048 | | FLAG2_DMA_BURST, | ||
2049 | .pba = 38, | ||
2050 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
2051 | .get_variants = e1000_get_variants_82571, | ||
2052 | .mac_ops = &e82571_mac_ops, | ||
2053 | .phy_ops = &e82_phy_ops_igp, | ||
2054 | .nvm_ops = &e82571_nvm_ops, | ||
2055 | }; | ||
2056 | |||
2057 | struct e1000_info e1000_82573_info = { | ||
2058 | .mac = e1000_82573, | ||
2059 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
2060 | | FLAG_HAS_WOL | ||
2061 | | FLAG_APME_IN_CTRL3 | ||
2062 | | FLAG_RX_CSUM_ENABLED | ||
2063 | | FLAG_HAS_SMART_POWER_DOWN | ||
2064 | | FLAG_HAS_AMT | ||
2065 | | FLAG_HAS_SWSM_ON_LOAD, | ||
2066 | .flags2 = FLAG2_DISABLE_ASPM_L1 | ||
2067 | | FLAG2_DISABLE_ASPM_L0S, | ||
2068 | .pba = 20, | ||
2069 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | ||
2070 | .get_variants = e1000_get_variants_82571, | ||
2071 | .mac_ops = &e82571_mac_ops, | ||
2072 | .phy_ops = &e82_phy_ops_m88, | ||
2073 | .nvm_ops = &e82571_nvm_ops, | ||
2074 | }; | ||
2075 | |||
2076 | struct e1000_info e1000_82574_info = { | ||
2077 | .mac = e1000_82574, | ||
2078 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
2079 | | FLAG_HAS_MSIX | ||
2080 | | FLAG_HAS_JUMBO_FRAMES | ||
2081 | | FLAG_HAS_WOL | ||
2082 | | FLAG_APME_IN_CTRL3 | ||
2083 | | FLAG_RX_CSUM_ENABLED | ||
2084 | | FLAG_HAS_SMART_POWER_DOWN | ||
2085 | | FLAG_HAS_AMT | ||
2086 | | FLAG_HAS_CTRLEXT_ON_LOAD, | ||
2087 | .flags2 = FLAG2_CHECK_PHY_HANG | ||
2088 | | FLAG2_DISABLE_ASPM_L0S, | ||
2089 | .pba = 32, | ||
2090 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
2091 | .get_variants = e1000_get_variants_82571, | ||
2092 | .mac_ops = &e82571_mac_ops, | ||
2093 | .phy_ops = &e82_phy_ops_bm, | ||
2094 | .nvm_ops = &e82571_nvm_ops, | ||
2095 | }; | ||
2096 | |||
2097 | struct e1000_info e1000_82583_info = { | ||
2098 | .mac = e1000_82583, | ||
2099 | .flags = FLAG_HAS_HW_VLAN_FILTER | ||
2100 | | FLAG_HAS_WOL | ||
2101 | | FLAG_APME_IN_CTRL3 | ||
2102 | | FLAG_RX_CSUM_ENABLED | ||
2103 | | FLAG_HAS_SMART_POWER_DOWN | ||
2104 | | FLAG_HAS_AMT | ||
2105 | | FLAG_HAS_JUMBO_FRAMES | ||
2106 | | FLAG_HAS_CTRLEXT_ON_LOAD, | ||
2107 | .flags2 = FLAG2_DISABLE_ASPM_L0S, | ||
2108 | .pba = 32, | ||
2109 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
2110 | .get_variants = e1000_get_variants_82571, | ||
2111 | .mac_ops = &e82571_mac_ops, | ||
2112 | .phy_ops = &e82_phy_ops_bm, | ||
2113 | .nvm_ops = &e82571_nvm_ops, | ||
2114 | }; | ||
2115 | |||
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile new file mode 100644 index 000000000000..948c05db5d68 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/Makefile | |||
@@ -0,0 +1,37 @@ | |||
1 | ################################################################################ | ||
2 | # | ||
3 | # Intel PRO/1000 Linux driver | ||
4 | # Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms and conditions of the GNU General Public License, | ||
8 | # version 2, as published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | # more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along with | ||
16 | # this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | # | ||
19 | # The full GNU General Public License is included in this distribution in | ||
20 | # the file called "COPYING". | ||
21 | # | ||
22 | # Contact Information: | ||
23 | # Linux NICS <linux.nics@intel.com> | ||
24 | # e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | # | ||
27 | ################################################################################ | ||
28 | |||
29 | # | ||
30 | # Makefile for the Intel(R) PRO/1000 ethernet driver | ||
31 | # | ||
32 | |||
33 | obj-$(CONFIG_E1000E) += e1000e.o | ||
34 | |||
35 | e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ | ||
36 | lib.o phy.o param.o ethtool.o netdev.o | ||
37 | |||
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h new file mode 100644 index 000000000000..c516a7440bec --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/defines.h | |||
@@ -0,0 +1,844 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _E1000_DEFINES_H_ | ||
30 | #define _E1000_DEFINES_H_ | ||
31 | |||
32 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | ||
33 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | ||
34 | #define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ | ||
35 | #define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | ||
36 | #define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ | ||
37 | #define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ | ||
38 | #define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ | ||
39 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | ||
40 | #define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ | ||
41 | #define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ | ||
42 | #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | ||
43 | #define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ | ||
44 | #define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ | ||
45 | #define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ | ||
46 | #define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ | ||
47 | #define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ | ||
48 | #define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ | ||
49 | #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ | ||
50 | |||
51 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | ||
52 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 | ||
53 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 | ||
54 | |||
55 | /* Definitions for power management and wakeup registers */ | ||
56 | /* Wake Up Control */ | ||
57 | #define E1000_WUC_APME 0x00000001 /* APM Enable */ | ||
58 | #define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ | ||
59 | #define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ | ||
60 | |||
61 | /* Wake Up Filter Control */ | ||
62 | #define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ | ||
63 | #define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ | ||
64 | #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ | ||
65 | #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ | ||
66 | #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ | ||
67 | #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ | ||
68 | |||
69 | /* Wake Up Status */ | ||
70 | #define E1000_WUS_LNKC E1000_WUFC_LNKC | ||
71 | #define E1000_WUS_MAG E1000_WUFC_MAG | ||
72 | #define E1000_WUS_EX E1000_WUFC_EX | ||
73 | #define E1000_WUS_MC E1000_WUFC_MC | ||
74 | #define E1000_WUS_BC E1000_WUFC_BC | ||
75 | |||
76 | /* Extended Device Control */ | ||
77 | #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ | ||
78 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ | ||
79 | #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ | ||
80 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ | ||
81 | #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ | ||
82 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | ||
83 | #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 | ||
84 | #define E1000_CTRL_EXT_EIAME 0x01000000 | ||
85 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | ||
86 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | ||
87 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | ||
88 | #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ | ||
89 | #define E1000_CTRL_EXT_LSECCK 0x00001000 | ||
90 | #define E1000_CTRL_EXT_PHYPDEN 0x00100000 | ||
91 | |||
92 | /* Receive Descriptor bit definitions */ | ||
93 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ | ||
94 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ | ||
95 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | ||
96 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | ||
97 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ | ||
98 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ | ||
99 | #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ | ||
100 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ | ||
101 | #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ | ||
102 | #define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ | ||
103 | #define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ | ||
104 | #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ | ||
105 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | ||
106 | |||
107 | #define E1000_RXDEXT_STATERR_CE 0x01000000 | ||
108 | #define E1000_RXDEXT_STATERR_SE 0x02000000 | ||
109 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 | ||
110 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 | ||
111 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 | ||
112 | |||
113 | /* mask to determine if packets should be dropped due to frame errors */ | ||
114 | #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ | ||
115 | E1000_RXD_ERR_CE | \ | ||
116 | E1000_RXD_ERR_SE | \ | ||
117 | E1000_RXD_ERR_SEQ | \ | ||
118 | E1000_RXD_ERR_CXE | \ | ||
119 | E1000_RXD_ERR_RXE) | ||
120 | |||
121 | /* Same mask, but for extended and packet split descriptors */ | ||
122 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ | ||
123 | E1000_RXDEXT_STATERR_CE | \ | ||
124 | E1000_RXDEXT_STATERR_SE | \ | ||
125 | E1000_RXDEXT_STATERR_SEQ | \ | ||
126 | E1000_RXDEXT_STATERR_CXE | \ | ||
127 | E1000_RXDEXT_STATERR_RXE) | ||
128 | |||
129 | #define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 | ||
130 | |||
131 | /* Management Control */ | ||
132 | #define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ | ||
133 | #define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ | ||
134 | #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ | ||
135 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ | ||
136 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ | ||
137 | /* Enable MAC address filtering */ | ||
138 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 | ||
139 | /* Enable MNG packets to host memory */ | ||
140 | #define E1000_MANC_EN_MNG2HOST 0x00200000 | ||
141 | |||
142 | #define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ | ||
143 | #define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ | ||
144 | #define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ | ||
145 | #define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ | ||
146 | |||
147 | /* Receive Control */ | ||
148 | #define E1000_RCTL_EN 0x00000002 /* enable */ | ||
149 | #define E1000_RCTL_SBP 0x00000004 /* store bad packet */ | ||
150 | #define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ | ||
151 | #define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ | ||
152 | #define E1000_RCTL_LPE 0x00000020 /* long packet enable */ | ||
153 | #define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ | ||
154 | #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ | ||
155 | #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ | ||
156 | #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ | ||
157 | #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ | ||
158 | #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ | ||
159 | #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ | ||
160 | #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ | ||
161 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ | ||
162 | #define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ | ||
163 | #define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ | ||
164 | #define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ | ||
165 | #define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ | ||
166 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ | ||
167 | #define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ | ||
168 | #define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ | ||
169 | #define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ | ||
170 | #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ | ||
171 | #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ | ||
172 | #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ | ||
173 | #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ | ||
174 | #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ | ||
175 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ | ||
176 | |||
177 | /* | ||
178 | * Use byte values for the following shift parameters | ||
179 | * Usage: | ||
180 | * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & | ||
181 | * E1000_PSRCTL_BSIZE0_MASK) | | ||
182 | * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & | ||
183 | * E1000_PSRCTL_BSIZE1_MASK) | | ||
184 | * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & | ||
185 | * E1000_PSRCTL_BSIZE2_MASK) | | ||
186 | * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; | ||
187 | * E1000_PSRCTL_BSIZE3_MASK)) | ||
188 | * where value0 = [128..16256], default=256 | ||
189 | * value1 = [1024..64512], default=4096 | ||
190 | * value2 = [0..64512], default=4096 | ||
191 | * value3 = [0..64512], default=0 | ||
192 | */ | ||
193 | |||
194 | #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F | ||
195 | #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 | ||
196 | #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 | ||
197 | #define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 | ||
198 | |||
199 | #define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ | ||
200 | #define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ | ||
201 | #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ | ||
202 | #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ | ||
203 | |||
204 | /* SWFW_SYNC Definitions */ | ||
205 | #define E1000_SWFW_EEP_SM 0x1 | ||
206 | #define E1000_SWFW_PHY0_SM 0x2 | ||
207 | #define E1000_SWFW_PHY1_SM 0x4 | ||
208 | #define E1000_SWFW_CSR_SM 0x8 | ||
209 | |||
210 | /* Device Control */ | ||
211 | #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ | ||
212 | #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ | ||
213 | #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ | ||
214 | #define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ | ||
215 | #define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ | ||
216 | #define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ | ||
217 | #define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ | ||
218 | #define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ | ||
219 | #define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ | ||
220 | #define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ | ||
221 | #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ | ||
222 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ | ||
223 | #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ | ||
224 | #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ | ||
225 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | ||
226 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | ||
227 | #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ | ||
228 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | ||
229 | #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ | ||
230 | #define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ | ||
231 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ | ||
232 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ | ||
233 | |||
234 | /* | ||
235 | * Bit definitions for the Management Data IO (MDIO) and Management Data | ||
236 | * Clock (MDC) pins in the Device Control Register. | ||
237 | */ | ||
238 | |||
239 | /* Device Status */ | ||
240 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | ||
241 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ | ||
242 | #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ | ||
243 | #define E1000_STATUS_FUNC_SHIFT 2 | ||
244 | #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ | ||
245 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ | ||
246 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | ||
247 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | ||
248 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | ||
249 | #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ | ||
250 | #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ | ||
251 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ | ||
252 | |||
253 | /* Constants used to interpret the masked PCI-X bus speed. */ | ||
254 | |||
255 | #define HALF_DUPLEX 1 | ||
256 | #define FULL_DUPLEX 2 | ||
257 | |||
258 | |||
259 | #define ADVERTISE_10_HALF 0x0001 | ||
260 | #define ADVERTISE_10_FULL 0x0002 | ||
261 | #define ADVERTISE_100_HALF 0x0004 | ||
262 | #define ADVERTISE_100_FULL 0x0008 | ||
263 | #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ | ||
264 | #define ADVERTISE_1000_FULL 0x0020 | ||
265 | |||
266 | /* 1000/H is not supported, nor spec-compliant. */ | ||
267 | #define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ | ||
268 | ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ | ||
269 | ADVERTISE_1000_FULL) | ||
270 | #define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ | ||
271 | ADVERTISE_100_HALF | ADVERTISE_100_FULL) | ||
272 | #define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) | ||
273 | #define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) | ||
274 | #define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) | ||
275 | |||
276 | #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX | ||
277 | |||
278 | /* LED Control */ | ||
279 | #define E1000_PHY_LED0_MODE_MASK 0x00000007 | ||
280 | #define E1000_PHY_LED0_IVRT 0x00000008 | ||
281 | #define E1000_PHY_LED0_MASK 0x0000001F | ||
282 | |||
283 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F | ||
284 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 | ||
285 | #define E1000_LEDCTL_LED0_IVRT 0x00000040 | ||
286 | #define E1000_LEDCTL_LED0_BLINK 0x00000080 | ||
287 | |||
288 | #define E1000_LEDCTL_MODE_LINK_UP 0x2 | ||
289 | #define E1000_LEDCTL_MODE_LED_ON 0xE | ||
290 | #define E1000_LEDCTL_MODE_LED_OFF 0xF | ||
291 | |||
292 | /* Transmit Descriptor bit definitions */ | ||
293 | #define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ | ||
294 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | ||
295 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | ||
296 | #define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ | ||
297 | #define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | ||
298 | #define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ | ||
299 | #define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ | ||
300 | #define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ | ||
301 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | ||
302 | #define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ | ||
303 | #define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ | ||
304 | #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | ||
305 | #define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ | ||
306 | #define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ | ||
307 | #define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ | ||
308 | #define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ | ||
309 | #define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ | ||
310 | #define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ | ||
311 | #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ | ||
312 | |||
313 | /* Transmit Control */ | ||
314 | #define E1000_TCTL_EN 0x00000002 /* enable Tx */ | ||
315 | #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ | ||
316 | #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ | ||
317 | #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ | ||
318 | #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ | ||
319 | #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ | ||
320 | |||
321 | /* Transmit Arbitration Count */ | ||
322 | |||
323 | /* SerDes Control */ | ||
324 | #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 | ||
325 | |||
326 | /* Receive Checksum Control */ | ||
327 | #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ | ||
328 | #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ | ||
329 | |||
330 | /* Header split receive */ | ||
331 | #define E1000_RFCTL_NFSW_DIS 0x00000040 | ||
332 | #define E1000_RFCTL_NFSR_DIS 0x00000080 | ||
333 | #define E1000_RFCTL_ACK_DIS 0x00001000 | ||
334 | #define E1000_RFCTL_EXTEN 0x00008000 | ||
335 | #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 | ||
336 | #define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 | ||
337 | |||
338 | /* Collision related configuration parameters */ | ||
339 | #define E1000_COLLISION_THRESHOLD 15 | ||
340 | #define E1000_CT_SHIFT 4 | ||
341 | #define E1000_COLLISION_DISTANCE 63 | ||
342 | #define E1000_COLD_SHIFT 12 | ||
343 | |||
344 | /* Default values for the transmit IPG register */ | ||
345 | #define DEFAULT_82543_TIPG_IPGT_COPPER 8 | ||
346 | |||
347 | #define E1000_TIPG_IPGT_MASK 0x000003FF | ||
348 | |||
349 | #define DEFAULT_82543_TIPG_IPGR1 8 | ||
350 | #define E1000_TIPG_IPGR1_SHIFT 10 | ||
351 | |||
352 | #define DEFAULT_82543_TIPG_IPGR2 6 | ||
353 | #define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 | ||
354 | #define E1000_TIPG_IPGR2_SHIFT 20 | ||
355 | |||
356 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 | ||
357 | |||
358 | /* Extended Configuration Control and Size */ | ||
359 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 | ||
360 | #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 | ||
361 | #define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 | ||
362 | #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 | ||
363 | #define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 | ||
364 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 | ||
365 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 | ||
366 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 | ||
367 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 | ||
368 | |||
369 | #define E1000_PHY_CTRL_D0A_LPLU 0x00000002 | ||
370 | #define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 | ||
371 | #define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 | ||
372 | #define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 | ||
373 | |||
374 | #define E1000_KABGTXD_BGSQLBIAS 0x00050000 | ||
375 | |||
376 | /* PBA constants */ | ||
377 | #define E1000_PBA_8K 0x0008 /* 8KB */ | ||
378 | #define E1000_PBA_16K 0x0010 /* 16KB */ | ||
379 | |||
380 | #define E1000_PBS_16K E1000_PBA_16K | ||
381 | |||
382 | #define IFS_MAX 80 | ||
383 | #define IFS_MIN 40 | ||
384 | #define IFS_RATIO 4 | ||
385 | #define IFS_STEP 10 | ||
386 | #define MIN_NUM_XMITS 1000 | ||
387 | |||
388 | /* SW Semaphore Register */ | ||
389 | #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ | ||
390 | #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ | ||
391 | #define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ | ||
392 | |||
393 | #define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ | ||
394 | |||
395 | /* Interrupt Cause Read */ | ||
396 | #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ | ||
397 | #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ | ||
398 | #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ | ||
399 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ | ||
400 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ | ||
401 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | ||
402 | #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ | ||
403 | #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ | ||
404 | #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ | ||
405 | #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ | ||
406 | #define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ | ||
407 | |||
408 | /* PBA ECC Register */ | ||
409 | #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ | ||
410 | #define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ | ||
411 | #define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ | ||
412 | #define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ | ||
413 | #define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ | ||
414 | |||
415 | /* | ||
416 | * This defines the bits that are set in the Interrupt Mask | ||
417 | * Set/Read Register. Each bit is documented below: | ||
418 | * o RXT0 = Receiver Timer Interrupt (ring 0) | ||
419 | * o TXDW = Transmit Descriptor Written Back | ||
420 | * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) | ||
421 | * o RXSEQ = Receive Sequence Error | ||
422 | * o LSC = Link Status Change | ||
423 | */ | ||
424 | #define IMS_ENABLE_MASK ( \ | ||
425 | E1000_IMS_RXT0 | \ | ||
426 | E1000_IMS_TXDW | \ | ||
427 | E1000_IMS_RXDMT0 | \ | ||
428 | E1000_IMS_RXSEQ | \ | ||
429 | E1000_IMS_LSC) | ||
430 | |||
431 | /* Interrupt Mask Set */ | ||
432 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | ||
433 | #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ | ||
434 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ | ||
435 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | ||
436 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ | ||
437 | #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ | ||
438 | #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ | ||
439 | #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ | ||
440 | #define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ | ||
441 | #define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ | ||
442 | |||
443 | /* Interrupt Cause Set */ | ||
444 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ | ||
445 | #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ | ||
446 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | ||
447 | |||
448 | /* Transmit Descriptor Control */ | ||
449 | #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ | ||
450 | #define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ | ||
451 | #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ | ||
452 | #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ | ||
453 | #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ | ||
454 | #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ | ||
455 | /* Enable the counting of desc. still to be processed. */ | ||
456 | #define E1000_TXDCTL_COUNT_DESC 0x00400000 | ||
457 | |||
458 | /* Flow Control Constants */ | ||
459 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 | ||
460 | #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 | ||
461 | #define FLOW_CONTROL_TYPE 0x8808 | ||
462 | |||
463 | /* 802.1q VLAN Packet Size */ | ||
464 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | ||
465 | |||
466 | /* Receive Address */ | ||
467 | /* | ||
468 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | ||
469 | * Registers) holds the directed and multicast addresses that we monitor. | ||
470 | * Technically, we have 16 spots. However, we reserve one of these spots | ||
471 | * (RAR[15]) for our directed address used by controllers with | ||
472 | * manageability enabled, allowing us room for 15 multicast addresses. | ||
473 | */ | ||
474 | #define E1000_RAR_ENTRIES 15 | ||
475 | #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ | ||
476 | #define E1000_RAL_MAC_ADDR_LEN 4 | ||
477 | #define E1000_RAH_MAC_ADDR_LEN 2 | ||
478 | |||
479 | /* Error Codes */ | ||
480 | #define E1000_ERR_NVM 1 | ||
481 | #define E1000_ERR_PHY 2 | ||
482 | #define E1000_ERR_CONFIG 3 | ||
483 | #define E1000_ERR_PARAM 4 | ||
484 | #define E1000_ERR_MAC_INIT 5 | ||
485 | #define E1000_ERR_PHY_TYPE 6 | ||
486 | #define E1000_ERR_RESET 9 | ||
487 | #define E1000_ERR_MASTER_REQUESTS_PENDING 10 | ||
488 | #define E1000_ERR_HOST_INTERFACE_COMMAND 11 | ||
489 | #define E1000_BLK_PHY_RESET 12 | ||
490 | #define E1000_ERR_SWFW_SYNC 13 | ||
491 | #define E1000_NOT_IMPLEMENTED 14 | ||
492 | #define E1000_ERR_INVALID_ARGUMENT 16 | ||
493 | #define E1000_ERR_NO_SPACE 17 | ||
494 | #define E1000_ERR_NVM_PBA_SECTION 18 | ||
495 | |||
496 | /* Loop limit on how long we wait for auto-negotiation to complete */ | ||
497 | #define FIBER_LINK_UP_LIMIT 50 | ||
498 | #define COPPER_LINK_UP_LIMIT 10 | ||
499 | #define PHY_AUTO_NEG_LIMIT 45 | ||
500 | #define PHY_FORCE_LIMIT 20 | ||
501 | /* Number of 100 microseconds we wait for PCI Express master disable */ | ||
502 | #define MASTER_DISABLE_TIMEOUT 800 | ||
503 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ | ||
504 | #define PHY_CFG_TIMEOUT 100 | ||
505 | /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ | ||
506 | #define MDIO_OWNERSHIP_TIMEOUT 10 | ||
507 | /* Number of milliseconds for NVM auto read done after MAC reset. */ | ||
508 | #define AUTO_READ_DONE_TIMEOUT 10 | ||
509 | |||
510 | /* Flow Control */ | ||
511 | #define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ | ||
512 | #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ | ||
513 | #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ | ||
514 | |||
515 | /* Transmit Configuration Word */ | ||
516 | #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ | ||
517 | #define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ | ||
518 | #define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ | ||
519 | #define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ | ||
520 | #define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ | ||
521 | |||
522 | /* Receive Configuration Word */ | ||
523 | #define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ | ||
524 | #define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ | ||
525 | #define E1000_RXCW_C 0x20000000 /* Receive config */ | ||
526 | #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ | ||
527 | |||
528 | /* PCI Express Control */ | ||
529 | #define E1000_GCR_RXD_NO_SNOOP 0x00000001 | ||
530 | #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 | ||
531 | #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 | ||
532 | #define E1000_GCR_TXD_NO_SNOOP 0x00000008 | ||
533 | #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 | ||
534 | #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 | ||
535 | |||
536 | #define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ | ||
537 | E1000_GCR_RXDSCW_NO_SNOOP | \ | ||
538 | E1000_GCR_RXDSCR_NO_SNOOP | \ | ||
539 | E1000_GCR_TXD_NO_SNOOP | \ | ||
540 | E1000_GCR_TXDSCW_NO_SNOOP | \ | ||
541 | E1000_GCR_TXDSCR_NO_SNOOP) | ||
542 | |||
543 | /* PHY Control Register */ | ||
544 | #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ | ||
545 | #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ | ||
546 | #define MII_CR_POWER_DOWN 0x0800 /* Power down */ | ||
547 | #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ | ||
548 | #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ | ||
549 | #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ | ||
550 | #define MII_CR_SPEED_1000 0x0040 | ||
551 | #define MII_CR_SPEED_100 0x2000 | ||
552 | #define MII_CR_SPEED_10 0x0000 | ||
553 | |||
554 | /* PHY Status Register */ | ||
555 | #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ | ||
556 | #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ | ||
557 | |||
558 | /* Autoneg Advertisement Register */ | ||
559 | #define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ | ||
560 | #define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ | ||
561 | #define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ | ||
562 | #define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ | ||
563 | #define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ | ||
564 | #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ | ||
565 | |||
566 | /* Link Partner Ability Register (Base Page) */ | ||
567 | #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ | ||
568 | #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ | ||
569 | |||
570 | /* Autoneg Expansion Register */ | ||
571 | #define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ | ||
572 | |||
573 | /* 1000BASE-T Control Register */ | ||
574 | #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ | ||
575 | #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ | ||
576 | /* 0=DTE device */ | ||
577 | #define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ | ||
578 | /* 0=Configure PHY as Slave */ | ||
579 | #define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ | ||
580 | /* 0=Automatic Master/Slave config */ | ||
581 | |||
582 | /* 1000BASE-T Status Register */ | ||
583 | #define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ | ||
584 | #define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ | ||
585 | |||
586 | |||
587 | /* PHY 1000 MII Register/Bit Definitions */ | ||
588 | /* PHY Registers defined by IEEE */ | ||
589 | #define PHY_CONTROL 0x00 /* Control Register */ | ||
590 | #define PHY_STATUS 0x01 /* Status Register */ | ||
591 | #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ | ||
592 | #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ | ||
593 | #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ | ||
594 | #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ | ||
595 | #define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ | ||
596 | #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ | ||
597 | #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ | ||
598 | #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ | ||
599 | |||
600 | #define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ | ||
601 | |||
602 | /* NVM Control */ | ||
603 | #define E1000_EECD_SK 0x00000001 /* NVM Clock */ | ||
604 | #define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ | ||
605 | #define E1000_EECD_DI 0x00000004 /* NVM Data In */ | ||
606 | #define E1000_EECD_DO 0x00000008 /* NVM Data Out */ | ||
607 | #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ | ||
608 | #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ | ||
609 | #define E1000_EECD_PRES 0x00000100 /* NVM Present */ | ||
610 | #define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ | ||
611 | /* NVM Addressing bits based on type (0-small, 1-large) */ | ||
612 | #define E1000_EECD_ADDR_BITS 0x00000400 | ||
613 | #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ | ||
614 | #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ | ||
615 | #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ | ||
616 | #define E1000_EECD_SIZE_EX_SHIFT 11 | ||
617 | #define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ | ||
618 | #define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ | ||
619 | #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ | ||
620 | #define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) | ||
621 | |||
622 | #define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ | ||
623 | #define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ | ||
624 | #define E1000_NVM_RW_REG_START 1 /* Start operation */ | ||
625 | #define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ | ||
626 | #define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ | ||
627 | #define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ | ||
628 | #define E1000_FLASH_UPDATES 2000 | ||
629 | |||
630 | /* NVM Word Offsets */ | ||
631 | #define NVM_COMPAT 0x0003 | ||
632 | #define NVM_ID_LED_SETTINGS 0x0004 | ||
633 | #define NVM_INIT_CONTROL2_REG 0x000F | ||
634 | #define NVM_INIT_CONTROL3_PORT_B 0x0014 | ||
635 | #define NVM_INIT_3GIO_3 0x001A | ||
636 | #define NVM_INIT_CONTROL3_PORT_A 0x0024 | ||
637 | #define NVM_CFG 0x0012 | ||
638 | #define NVM_ALT_MAC_ADDR_PTR 0x0037 | ||
639 | #define NVM_CHECKSUM_REG 0x003F | ||
640 | |||
641 | #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ | ||
642 | |||
643 | #define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ | ||
644 | #define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ | ||
645 | |||
646 | /* Mask bits for fields in Word 0x0f of the NVM */ | ||
647 | #define NVM_WORD0F_PAUSE_MASK 0x3000 | ||
648 | #define NVM_WORD0F_PAUSE 0x1000 | ||
649 | #define NVM_WORD0F_ASM_DIR 0x2000 | ||
650 | |||
651 | /* Mask bits for fields in Word 0x1a of the NVM */ | ||
652 | #define NVM_WORD1A_ASPM_MASK 0x000C | ||
653 | |||
654 | /* Mask bits for fields in Word 0x03 of the EEPROM */ | ||
655 | #define NVM_COMPAT_LOM 0x0800 | ||
656 | |||
657 | /* length of string needed to store PBA number */ | ||
658 | #define E1000_PBANUM_LENGTH 11 | ||
659 | |||
660 | /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ | ||
661 | #define NVM_SUM 0xBABA | ||
662 | |||
663 | /* PBA (printed board assembly) number words */ | ||
664 | #define NVM_PBA_OFFSET_0 8 | ||
665 | #define NVM_PBA_OFFSET_1 9 | ||
666 | #define NVM_PBA_PTR_GUARD 0xFAFA | ||
667 | #define NVM_WORD_SIZE_BASE_SHIFT 6 | ||
668 | |||
669 | /* NVM Commands - SPI */ | ||
670 | #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ | ||
671 | #define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ | ||
672 | #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ | ||
673 | #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ | ||
674 | #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ | ||
675 | #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ | ||
676 | |||
677 | /* SPI NVM Status Register */ | ||
678 | #define NVM_STATUS_RDY_SPI 0x01 | ||
679 | |||
680 | /* Word definitions for ID LED Settings */ | ||
681 | #define ID_LED_RESERVED_0000 0x0000 | ||
682 | #define ID_LED_RESERVED_FFFF 0xFFFF | ||
683 | #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ | ||
684 | (ID_LED_OFF1_OFF2 << 8) | \ | ||
685 | (ID_LED_DEF1_DEF2 << 4) | \ | ||
686 | (ID_LED_DEF1_DEF2)) | ||
687 | #define ID_LED_DEF1_DEF2 0x1 | ||
688 | #define ID_LED_DEF1_ON2 0x2 | ||
689 | #define ID_LED_DEF1_OFF2 0x3 | ||
690 | #define ID_LED_ON1_DEF2 0x4 | ||
691 | #define ID_LED_ON1_ON2 0x5 | ||
692 | #define ID_LED_ON1_OFF2 0x6 | ||
693 | #define ID_LED_OFF1_DEF2 0x7 | ||
694 | #define ID_LED_OFF1_ON2 0x8 | ||
695 | #define ID_LED_OFF1_OFF2 0x9 | ||
696 | |||
697 | #define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF | ||
698 | #define IGP_ACTIVITY_LED_ENABLE 0x0300 | ||
699 | #define IGP_LED3_MODE 0x07000000 | ||
700 | |||
701 | /* PCI/PCI-X/PCI-EX Config space */ | ||
702 | #define PCI_HEADER_TYPE_REGISTER 0x0E | ||
703 | #define PCIE_LINK_STATUS 0x12 | ||
704 | |||
705 | #define PCI_HEADER_TYPE_MULTIFUNC 0x80 | ||
706 | #define PCIE_LINK_WIDTH_MASK 0x3F0 | ||
707 | #define PCIE_LINK_WIDTH_SHIFT 4 | ||
708 | |||
709 | #define PHY_REVISION_MASK 0xFFFFFFF0 | ||
710 | #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ | ||
711 | #define MAX_PHY_MULTI_PAGE_REG 0xF | ||
712 | |||
713 | /* Bit definitions for valid PHY IDs. */ | ||
714 | /* | ||
715 | * I = Integrated | ||
716 | * E = External | ||
717 | */ | ||
718 | #define M88E1000_E_PHY_ID 0x01410C50 | ||
719 | #define M88E1000_I_PHY_ID 0x01410C30 | ||
720 | #define M88E1011_I_PHY_ID 0x01410C20 | ||
721 | #define IGP01E1000_I_PHY_ID 0x02A80380 | ||
722 | #define M88E1111_I_PHY_ID 0x01410CC0 | ||
723 | #define GG82563_E_PHY_ID 0x01410CA0 | ||
724 | #define IGP03E1000_E_PHY_ID 0x02A80390 | ||
725 | #define IFE_E_PHY_ID 0x02A80330 | ||
726 | #define IFE_PLUS_E_PHY_ID 0x02A80320 | ||
727 | #define IFE_C_E_PHY_ID 0x02A80310 | ||
728 | #define BME1000_E_PHY_ID 0x01410CB0 | ||
729 | #define BME1000_E_PHY_ID_R2 0x01410CB1 | ||
730 | #define I82577_E_PHY_ID 0x01540050 | ||
731 | #define I82578_E_PHY_ID 0x004DD040 | ||
732 | #define I82579_E_PHY_ID 0x01540090 | ||
733 | |||
734 | /* M88E1000 Specific Registers */ | ||
735 | #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ | ||
736 | #define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ | ||
737 | #define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ | ||
738 | |||
739 | #define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ | ||
740 | #define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ | ||
741 | |||
742 | /* M88E1000 PHY Specific Control Register */ | ||
743 | #define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ | ||
744 | #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ | ||
745 | /* Manual MDI configuration */ | ||
746 | #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ | ||
747 | /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ | ||
748 | #define M88E1000_PSCR_AUTO_X_1000T 0x0040 | ||
749 | /* Auto crossover enabled all speeds */ | ||
750 | #define M88E1000_PSCR_AUTO_X_MODE 0x0060 | ||
751 | /* | ||
752 | * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) | ||
753 | * 0=Normal 10BASE-T Rx Threshold | ||
754 | */ | ||
755 | #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ | ||
756 | |||
757 | /* M88E1000 PHY Specific Status Register */ | ||
758 | #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ | ||
759 | #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ | ||
760 | #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ | ||
761 | /* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ | ||
762 | #define M88E1000_PSSR_CABLE_LENGTH 0x0380 | ||
763 | #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ | ||
764 | #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ | ||
765 | |||
766 | #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 | ||
767 | |||
768 | /* | ||
769 | * Number of times we will attempt to autonegotiate before downshifting if we | ||
770 | * are the master | ||
771 | */ | ||
772 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 | ||
773 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 | ||
774 | /* | ||
775 | * Number of times we will attempt to autonegotiate before downshifting if we | ||
776 | * are the slave | ||
777 | */ | ||
778 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 | ||
779 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 | ||
780 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ | ||
781 | |||
782 | /* M88EC018 Rev 2 specific DownShift settings */ | ||
783 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 | ||
784 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | ||
785 | |||
786 | #define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 | ||
787 | #define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C | ||
788 | |||
789 | /* BME1000 PHY Specific Control Register */ | ||
790 | #define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ | ||
791 | |||
792 | |||
793 | #define PHY_PAGE_SHIFT 5 | ||
794 | #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ | ||
795 | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
796 | |||
797 | /* | ||
798 | * Bits... | ||
799 | * 15-5: page | ||
800 | * 4-0: register offset | ||
801 | */ | ||
802 | #define GG82563_PAGE_SHIFT 5 | ||
803 | #define GG82563_REG(page, reg) \ | ||
804 | (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
805 | #define GG82563_MIN_ALT_REG 30 | ||
806 | |||
807 | /* GG82563 Specific Registers */ | ||
808 | #define GG82563_PHY_SPEC_CTRL \ | ||
809 | GG82563_REG(0, 16) /* PHY Specific Control */ | ||
810 | #define GG82563_PHY_PAGE_SELECT \ | ||
811 | GG82563_REG(0, 22) /* Page Select */ | ||
812 | #define GG82563_PHY_SPEC_CTRL_2 \ | ||
813 | GG82563_REG(0, 26) /* PHY Specific Control 2 */ | ||
814 | #define GG82563_PHY_PAGE_SELECT_ALT \ | ||
815 | GG82563_REG(0, 29) /* Alternate Page Select */ | ||
816 | |||
817 | #define GG82563_PHY_MAC_SPEC_CTRL \ | ||
818 | GG82563_REG(2, 21) /* MAC Specific Control Register */ | ||
819 | |||
820 | #define GG82563_PHY_DSP_DISTANCE \ | ||
821 | GG82563_REG(5, 26) /* DSP Distance */ | ||
822 | |||
823 | /* Page 193 - Port Control Registers */ | ||
824 | #define GG82563_PHY_KMRN_MODE_CTRL \ | ||
825 | GG82563_REG(193, 16) /* Kumeran Mode Control */ | ||
826 | #define GG82563_PHY_PWR_MGMT_CTRL \ | ||
827 | GG82563_REG(193, 20) /* Power Management Control */ | ||
828 | |||
829 | /* Page 194 - KMRN Registers */ | ||
830 | #define GG82563_PHY_INBAND_CTRL \ | ||
831 | GG82563_REG(194, 18) /* Inband Control */ | ||
832 | |||
833 | /* MDI Control */ | ||
834 | #define E1000_MDIC_REG_SHIFT 16 | ||
835 | #define E1000_MDIC_PHY_SHIFT 21 | ||
836 | #define E1000_MDIC_OP_WRITE 0x04000000 | ||
837 | #define E1000_MDIC_OP_READ 0x08000000 | ||
838 | #define E1000_MDIC_READY 0x10000000 | ||
839 | #define E1000_MDIC_ERROR 0x40000000 | ||
840 | |||
841 | /* SerDes Control */ | ||
842 | #define E1000_GEN_POLL_TIMEOUT 640 | ||
843 | |||
844 | #endif /* _E1000_DEFINES_H_ */ | ||
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h new file mode 100644 index 000000000000..638d175792cf --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -0,0 +1,736 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* Linux PRO/1000 Ethernet Driver main header file */ | ||
30 | |||
31 | #ifndef _E1000_H_ | ||
32 | #define _E1000_H_ | ||
33 | |||
34 | #include <linux/bitops.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/timer.h> | ||
37 | #include <linux/workqueue.h> | ||
38 | #include <linux/io.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/pci-aspm.h> | ||
42 | #include <linux/crc32.h> | ||
43 | #include <linux/if_vlan.h> | ||
44 | |||
45 | #include "hw.h" | ||
46 | |||
47 | struct e1000_info; | ||
48 | |||
49 | #define e_dbg(format, arg...) \ | ||
50 | netdev_dbg(hw->adapter->netdev, format, ## arg) | ||
51 | #define e_err(format, arg...) \ | ||
52 | netdev_err(adapter->netdev, format, ## arg) | ||
53 | #define e_info(format, arg...) \ | ||
54 | netdev_info(adapter->netdev, format, ## arg) | ||
55 | #define e_warn(format, arg...) \ | ||
56 | netdev_warn(adapter->netdev, format, ## arg) | ||
57 | #define e_notice(format, arg...) \ | ||
58 | netdev_notice(adapter->netdev, format, ## arg) | ||
59 | |||
60 | |||
61 | /* Interrupt modes, as used by the IntMode parameter */ | ||
62 | #define E1000E_INT_MODE_LEGACY 0 | ||
63 | #define E1000E_INT_MODE_MSI 1 | ||
64 | #define E1000E_INT_MODE_MSIX 2 | ||
65 | |||
66 | /* Tx/Rx descriptor defines */ | ||
67 | #define E1000_DEFAULT_TXD 256 | ||
68 | #define E1000_MAX_TXD 4096 | ||
69 | #define E1000_MIN_TXD 64 | ||
70 | |||
71 | #define E1000_DEFAULT_RXD 256 | ||
72 | #define E1000_MAX_RXD 4096 | ||
73 | #define E1000_MIN_RXD 64 | ||
74 | |||
75 | #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ | ||
76 | #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ | ||
77 | |||
78 | /* Early Receive defines */ | ||
79 | #define E1000_ERT_2048 0x100 | ||
80 | |||
81 | #define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ | ||
82 | |||
83 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | ||
84 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | ||
85 | #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | ||
86 | |||
87 | #define AUTO_ALL_MODES 0 | ||
88 | #define E1000_EEPROM_APME 0x0400 | ||
89 | |||
90 | #define E1000_MNG_VLAN_NONE (-1) | ||
91 | |||
92 | /* Number of packet split data buffers (not including the header buffer) */ | ||
93 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | ||
94 | |||
95 | #define DEFAULT_JUMBO 9234 | ||
96 | |||
97 | /* BM/HV Specific Registers */ | ||
98 | #define BM_PORT_CTRL_PAGE 769 | ||
99 | |||
100 | #define PHY_UPPER_SHIFT 21 | ||
101 | #define BM_PHY_REG(page, reg) \ | ||
102 | (((reg) & MAX_PHY_REG_ADDRESS) |\ | ||
103 | (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ | ||
104 | (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) | ||
105 | |||
106 | /* PHY Wakeup Registers and defines */ | ||
107 | #define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) | ||
108 | #define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) | ||
109 | #define BM_WUC PHY_REG(BM_WUC_PAGE, 1) | ||
110 | #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) | ||
111 | #define BM_WUS PHY_REG(BM_WUC_PAGE, 3) | ||
112 | #define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) | ||
113 | #define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) | ||
114 | #define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) | ||
115 | #define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) | ||
116 | #define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) | ||
117 | |||
118 | #define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ | ||
119 | #define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ | ||
120 | #define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ | ||
121 | #define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ | ||
122 | #define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ | ||
123 | #define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ | ||
124 | #define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ | ||
125 | |||
126 | #define HV_STATS_PAGE 778 | ||
127 | #define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ | ||
128 | #define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) | ||
129 | #define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ | ||
130 | #define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) | ||
131 | #define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ | ||
132 | #define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) | ||
133 | #define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ | ||
134 | #define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) | ||
135 | #define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ | ||
136 | #define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) | ||
137 | #define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ | ||
138 | #define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) | ||
139 | #define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ | ||
140 | #define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) | ||
141 | |||
142 | #define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ | ||
143 | |||
144 | /* BM PHY Copper Specific Status */ | ||
145 | #define BM_CS_STATUS 17 | ||
146 | #define BM_CS_STATUS_LINK_UP 0x0400 | ||
147 | #define BM_CS_STATUS_RESOLVED 0x0800 | ||
148 | #define BM_CS_STATUS_SPEED_MASK 0xC000 | ||
149 | #define BM_CS_STATUS_SPEED_1000 0x8000 | ||
150 | |||
151 | /* 82577 Mobile Phy Status Register */ | ||
152 | #define HV_M_STATUS 26 | ||
153 | #define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 | ||
154 | #define HV_M_STATUS_SPEED_MASK 0x0300 | ||
155 | #define HV_M_STATUS_SPEED_1000 0x0200 | ||
156 | #define HV_M_STATUS_LINK_UP 0x0040 | ||
157 | |||
158 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ | ||
159 | #define LINK_TIMEOUT 100 | ||
160 | |||
161 | #define DEFAULT_RDTR 0 | ||
162 | #define DEFAULT_RADV 8 | ||
163 | #define BURST_RDTR 0x20 | ||
164 | #define BURST_RADV 0x20 | ||
165 | |||
166 | /* | ||
167 | * in the case of WTHRESH, it appears at least the 82571/2 hardware | ||
168 | * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when | ||
169 | * WTHRESH=4, and since we want 64 bytes at a time written back, set | ||
170 | * it to 5 | ||
171 | */ | ||
172 | #define E1000_TXDCTL_DMA_BURST_ENABLE \ | ||
173 | (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ | ||
174 | E1000_TXDCTL_COUNT_DESC | \ | ||
175 | (5 << 16) | /* wthresh must be +1 more than desired */\ | ||
176 | (1 << 8) | /* hthresh */ \ | ||
177 | 0x1f) /* pthresh */ | ||
178 | |||
179 | #define E1000_RXDCTL_DMA_BURST_ENABLE \ | ||
180 | (0x01000000 | /* set descriptor granularity */ \ | ||
181 | (4 << 16) | /* set writeback threshold */ \ | ||
182 | (4 << 8) | /* set prefetch threshold */ \ | ||
183 | 0x20) /* set hthresh */ | ||
184 | |||
185 | #define E1000_TIDV_FPD (1 << 31) | ||
186 | #define E1000_RDTR_FPD (1 << 31) | ||
187 | |||
188 | enum e1000_boards { | ||
189 | board_82571, | ||
190 | board_82572, | ||
191 | board_82573, | ||
192 | board_82574, | ||
193 | board_82583, | ||
194 | board_80003es2lan, | ||
195 | board_ich8lan, | ||
196 | board_ich9lan, | ||
197 | board_ich10lan, | ||
198 | board_pchlan, | ||
199 | board_pch2lan, | ||
200 | }; | ||
201 | |||
202 | struct e1000_ps_page { | ||
203 | struct page *page; | ||
204 | u64 dma; /* must be u64 - written to hw */ | ||
205 | }; | ||
206 | |||
207 | /* | ||
208 | * wrappers around a pointer to a socket buffer, | ||
209 | * so a DMA handle can be stored along with the buffer | ||
210 | */ | ||
211 | struct e1000_buffer { | ||
212 | dma_addr_t dma; | ||
213 | struct sk_buff *skb; | ||
214 | union { | ||
215 | /* Tx */ | ||
216 | struct { | ||
217 | unsigned long time_stamp; | ||
218 | u16 length; | ||
219 | u16 next_to_watch; | ||
220 | unsigned int segs; | ||
221 | unsigned int bytecount; | ||
222 | u16 mapped_as_page; | ||
223 | }; | ||
224 | /* Rx */ | ||
225 | struct { | ||
226 | /* arrays of page information for packet split */ | ||
227 | struct e1000_ps_page *ps_pages; | ||
228 | struct page *page; | ||
229 | }; | ||
230 | }; | ||
231 | }; | ||
232 | |||
233 | struct e1000_ring { | ||
234 | void *desc; /* pointer to ring memory */ | ||
235 | dma_addr_t dma; /* phys address of ring */ | ||
236 | unsigned int size; /* length of ring in bytes */ | ||
237 | unsigned int count; /* number of desc. in ring */ | ||
238 | |||
239 | u16 next_to_use; | ||
240 | u16 next_to_clean; | ||
241 | |||
242 | u16 head; | ||
243 | u16 tail; | ||
244 | |||
245 | /* array of buffer information structs */ | ||
246 | struct e1000_buffer *buffer_info; | ||
247 | |||
248 | char name[IFNAMSIZ + 5]; | ||
249 | u32 ims_val; | ||
250 | u32 itr_val; | ||
251 | u16 itr_register; | ||
252 | int set_itr; | ||
253 | |||
254 | struct sk_buff *rx_skb_top; | ||
255 | }; | ||
256 | |||
257 | /* PHY register snapshot values */ | ||
258 | struct e1000_phy_regs { | ||
259 | u16 bmcr; /* basic mode control register */ | ||
260 | u16 bmsr; /* basic mode status register */ | ||
261 | u16 advertise; /* auto-negotiation advertisement */ | ||
262 | u16 lpa; /* link partner ability register */ | ||
263 | u16 expansion; /* auto-negotiation expansion reg */ | ||
264 | u16 ctrl1000; /* 1000BASE-T control register */ | ||
265 | u16 stat1000; /* 1000BASE-T status register */ | ||
266 | u16 estatus; /* extended status register */ | ||
267 | }; | ||
268 | |||
269 | /* board specific private data structure */ | ||
270 | struct e1000_adapter { | ||
271 | struct timer_list watchdog_timer; | ||
272 | struct timer_list phy_info_timer; | ||
273 | struct timer_list blink_timer; | ||
274 | |||
275 | struct work_struct reset_task; | ||
276 | struct work_struct watchdog_task; | ||
277 | |||
278 | const struct e1000_info *ei; | ||
279 | |||
280 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
281 | u32 bd_number; | ||
282 | u32 rx_buffer_len; | ||
283 | u16 mng_vlan_id; | ||
284 | u16 link_speed; | ||
285 | u16 link_duplex; | ||
286 | u16 eeprom_vers; | ||
287 | |||
288 | /* track device up/down/testing state */ | ||
289 | unsigned long state; | ||
290 | |||
291 | /* Interrupt Throttle Rate */ | ||
292 | u32 itr; | ||
293 | u32 itr_setting; | ||
294 | u16 tx_itr; | ||
295 | u16 rx_itr; | ||
296 | |||
297 | /* | ||
298 | * Tx | ||
299 | */ | ||
300 | struct e1000_ring *tx_ring /* One per active queue */ | ||
301 | ____cacheline_aligned_in_smp; | ||
302 | |||
303 | struct napi_struct napi; | ||
304 | |||
305 | unsigned int restart_queue; | ||
306 | u32 txd_cmd; | ||
307 | |||
308 | bool detect_tx_hung; | ||
309 | u8 tx_timeout_factor; | ||
310 | |||
311 | u32 tx_int_delay; | ||
312 | u32 tx_abs_int_delay; | ||
313 | |||
314 | unsigned int total_tx_bytes; | ||
315 | unsigned int total_tx_packets; | ||
316 | unsigned int total_rx_bytes; | ||
317 | unsigned int total_rx_packets; | ||
318 | |||
319 | /* Tx stats */ | ||
320 | u64 tpt_old; | ||
321 | u64 colc_old; | ||
322 | u32 gotc; | ||
323 | u64 gotc_old; | ||
324 | u32 tx_timeout_count; | ||
325 | u32 tx_fifo_head; | ||
326 | u32 tx_head_addr; | ||
327 | u32 tx_fifo_size; | ||
328 | u32 tx_dma_failed; | ||
329 | |||
330 | /* | ||
331 | * Rx | ||
332 | */ | ||
333 | bool (*clean_rx) (struct e1000_adapter *adapter, | ||
334 | int *work_done, int work_to_do) | ||
335 | ____cacheline_aligned_in_smp; | ||
336 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, | ||
337 | int cleaned_count, gfp_t gfp); | ||
338 | struct e1000_ring *rx_ring; | ||
339 | |||
340 | u32 rx_int_delay; | ||
341 | u32 rx_abs_int_delay; | ||
342 | |||
343 | /* Rx stats */ | ||
344 | u64 hw_csum_err; | ||
345 | u64 hw_csum_good; | ||
346 | u64 rx_hdr_split; | ||
347 | u32 gorc; | ||
348 | u64 gorc_old; | ||
349 | u32 alloc_rx_buff_failed; | ||
350 | u32 rx_dma_failed; | ||
351 | |||
352 | unsigned int rx_ps_pages; | ||
353 | u16 rx_ps_bsize0; | ||
354 | u32 max_frame_size; | ||
355 | u32 min_frame_size; | ||
356 | |||
357 | /* OS defined structs */ | ||
358 | struct net_device *netdev; | ||
359 | struct pci_dev *pdev; | ||
360 | |||
361 | /* structs defined in e1000_hw.h */ | ||
362 | struct e1000_hw hw; | ||
363 | |||
364 | spinlock_t stats64_lock; | ||
365 | struct e1000_hw_stats stats; | ||
366 | struct e1000_phy_info phy_info; | ||
367 | struct e1000_phy_stats phy_stats; | ||
368 | |||
369 | /* Snapshot of PHY registers */ | ||
370 | struct e1000_phy_regs phy_regs; | ||
371 | |||
372 | struct e1000_ring test_tx_ring; | ||
373 | struct e1000_ring test_rx_ring; | ||
374 | u32 test_icr; | ||
375 | |||
376 | u32 msg_enable; | ||
377 | unsigned int num_vectors; | ||
378 | struct msix_entry *msix_entries; | ||
379 | int int_mode; | ||
380 | u32 eiac_mask; | ||
381 | |||
382 | u32 eeprom_wol; | ||
383 | u32 wol; | ||
384 | u32 pba; | ||
385 | u32 max_hw_frame_size; | ||
386 | |||
387 | bool fc_autoneg; | ||
388 | |||
389 | unsigned int flags; | ||
390 | unsigned int flags2; | ||
391 | struct work_struct downshift_task; | ||
392 | struct work_struct update_phy_task; | ||
393 | struct work_struct print_hang_task; | ||
394 | |||
395 | bool idle_check; | ||
396 | int phy_hang_count; | ||
397 | }; | ||
398 | |||
399 | struct e1000_info { | ||
400 | enum e1000_mac_type mac; | ||
401 | unsigned int flags; | ||
402 | unsigned int flags2; | ||
403 | u32 pba; | ||
404 | u32 max_hw_frame_size; | ||
405 | s32 (*get_variants)(struct e1000_adapter *); | ||
406 | struct e1000_mac_operations *mac_ops; | ||
407 | struct e1000_phy_operations *phy_ops; | ||
408 | struct e1000_nvm_operations *nvm_ops; | ||
409 | }; | ||
410 | |||
411 | /* hardware capability, feature, and workaround flags */ | ||
412 | #define FLAG_HAS_AMT (1 << 0) | ||
413 | #define FLAG_HAS_FLASH (1 << 1) | ||
414 | #define FLAG_HAS_HW_VLAN_FILTER (1 << 2) | ||
415 | #define FLAG_HAS_WOL (1 << 3) | ||
416 | #define FLAG_HAS_ERT (1 << 4) | ||
417 | #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) | ||
418 | #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) | ||
419 | #define FLAG_HAS_JUMBO_FRAMES (1 << 7) | ||
420 | #define FLAG_READ_ONLY_NVM (1 << 8) | ||
421 | #define FLAG_IS_ICH (1 << 9) | ||
422 | #define FLAG_HAS_MSIX (1 << 10) | ||
423 | #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) | ||
424 | #define FLAG_IS_QUAD_PORT_A (1 << 12) | ||
425 | #define FLAG_IS_QUAD_PORT (1 << 13) | ||
426 | #define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) | ||
427 | #define FLAG_APME_IN_WUC (1 << 15) | ||
428 | #define FLAG_APME_IN_CTRL3 (1 << 16) | ||
429 | #define FLAG_APME_CHECK_PORT_B (1 << 17) | ||
430 | #define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) | ||
431 | #define FLAG_NO_WAKE_UCAST (1 << 19) | ||
432 | #define FLAG_MNG_PT_ENABLED (1 << 20) | ||
433 | #define FLAG_RESET_OVERWRITES_LAA (1 << 21) | ||
434 | #define FLAG_TARC_SPEED_MODE_BIT (1 << 22) | ||
435 | #define FLAG_TARC_SET_BIT_ZERO (1 << 23) | ||
436 | #define FLAG_RX_NEEDS_RESTART (1 << 24) | ||
437 | #define FLAG_LSC_GIG_SPEED_DROP (1 << 25) | ||
438 | #define FLAG_SMART_POWER_DOWN (1 << 26) | ||
439 | #define FLAG_MSI_ENABLED (1 << 27) | ||
440 | #define FLAG_RX_CSUM_ENABLED (1 << 28) | ||
441 | #define FLAG_TSO_FORCE (1 << 29) | ||
442 | #define FLAG_RX_RESTART_NOW (1 << 30) | ||
443 | #define FLAG_MSI_TEST_FAILED (1 << 31) | ||
444 | |||
445 | /* CRC Stripping defines */ | ||
446 | #define FLAG2_CRC_STRIPPING (1 << 0) | ||
447 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | ||
448 | #define FLAG2_IS_DISCARDING (1 << 2) | ||
449 | #define FLAG2_DISABLE_ASPM_L1 (1 << 3) | ||
450 | #define FLAG2_HAS_PHY_STATS (1 << 4) | ||
451 | #define FLAG2_HAS_EEE (1 << 5) | ||
452 | #define FLAG2_DMA_BURST (1 << 6) | ||
453 | #define FLAG2_DISABLE_ASPM_L0S (1 << 7) | ||
454 | #define FLAG2_DISABLE_AIM (1 << 8) | ||
455 | #define FLAG2_CHECK_PHY_HANG (1 << 9) | ||
456 | |||
457 | #define E1000_RX_DESC_PS(R, i) \ | ||
458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | ||
459 | #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) | ||
460 | #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) | ||
461 | #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) | ||
462 | #define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) | ||
463 | |||
464 | enum e1000_state_t { | ||
465 | __E1000_TESTING, | ||
466 | __E1000_RESETTING, | ||
467 | __E1000_DOWN | ||
468 | }; | ||
469 | |||
470 | enum latency_range { | ||
471 | lowest_latency = 0, | ||
472 | low_latency = 1, | ||
473 | bulk_latency = 2, | ||
474 | latency_invalid = 255 | ||
475 | }; | ||
476 | |||
477 | extern char e1000e_driver_name[]; | ||
478 | extern const char e1000e_driver_version[]; | ||
479 | |||
480 | extern void e1000e_check_options(struct e1000_adapter *adapter); | ||
481 | extern void e1000e_set_ethtool_ops(struct net_device *netdev); | ||
482 | |||
483 | extern int e1000e_up(struct e1000_adapter *adapter); | ||
484 | extern void e1000e_down(struct e1000_adapter *adapter); | ||
485 | extern void e1000e_reinit_locked(struct e1000_adapter *adapter); | ||
486 | extern void e1000e_reset(struct e1000_adapter *adapter); | ||
487 | extern void e1000e_power_up_phy(struct e1000_adapter *adapter); | ||
488 | extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); | ||
489 | extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); | ||
490 | extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); | ||
491 | extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); | ||
492 | extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | ||
493 | struct rtnl_link_stats64 | ||
494 | *stats); | ||
495 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | ||
496 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | ||
497 | extern void e1000e_get_hw_control(struct e1000_adapter *adapter); | ||
498 | extern void e1000e_release_hw_control(struct e1000_adapter *adapter); | ||
499 | |||
500 | extern unsigned int copybreak; | ||
501 | |||
502 | extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); | ||
503 | |||
504 | extern struct e1000_info e1000_82571_info; | ||
505 | extern struct e1000_info e1000_82572_info; | ||
506 | extern struct e1000_info e1000_82573_info; | ||
507 | extern struct e1000_info e1000_82574_info; | ||
508 | extern struct e1000_info e1000_82583_info; | ||
509 | extern struct e1000_info e1000_ich8_info; | ||
510 | extern struct e1000_info e1000_ich9_info; | ||
511 | extern struct e1000_info e1000_ich10_info; | ||
512 | extern struct e1000_info e1000_pch_info; | ||
513 | extern struct e1000_info e1000_pch2_info; | ||
514 | extern struct e1000_info e1000_es2_info; | ||
515 | |||
516 | extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, | ||
517 | u32 pba_num_size); | ||
518 | |||
519 | extern s32 e1000e_commit_phy(struct e1000_hw *hw); | ||
520 | |||
521 | extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); | ||
522 | |||
523 | extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); | ||
524 | extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); | ||
525 | |||
526 | extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); | ||
527 | extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | ||
528 | bool state); | ||
529 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | ||
530 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | ||
531 | extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); | ||
532 | extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); | ||
533 | extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
534 | extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); | ||
535 | extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); | ||
536 | |||
537 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | ||
538 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | ||
539 | extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); | ||
540 | extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); | ||
541 | extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); | ||
542 | extern s32 e1000e_led_on_generic(struct e1000_hw *hw); | ||
543 | extern s32 e1000e_led_off_generic(struct e1000_hw *hw); | ||
544 | extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); | ||
545 | extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); | ||
546 | extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); | ||
547 | extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); | ||
548 | extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); | ||
549 | extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); | ||
550 | extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); | ||
551 | extern s32 e1000e_id_led_init(struct e1000_hw *hw); | ||
552 | extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); | ||
553 | extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); | ||
554 | extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); | ||
555 | extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); | ||
556 | extern s32 e1000e_setup_link(struct e1000_hw *hw); | ||
557 | extern void e1000_clear_vfta_generic(struct e1000_hw *hw); | ||
558 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | ||
559 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | ||
560 | u8 *mc_addr_list, | ||
561 | u32 mc_addr_count); | ||
562 | extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | ||
563 | extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); | ||
564 | extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); | ||
565 | extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); | ||
566 | extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); | ||
567 | extern void e1000e_config_collision_dist(struct e1000_hw *hw); | ||
568 | extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); | ||
569 | extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); | ||
570 | extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); | ||
571 | extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); | ||
572 | extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); | ||
573 | extern void e1000e_reset_adaptive(struct e1000_hw *hw); | ||
574 | extern void e1000e_update_adaptive(struct e1000_hw *hw); | ||
575 | |||
576 | extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); | ||
577 | extern s32 e1000e_get_phy_id(struct e1000_hw *hw); | ||
578 | extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); | ||
579 | extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); | ||
580 | extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); | ||
581 | extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); | ||
582 | extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); | ||
583 | extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); | ||
584 | extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); | ||
585 | extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | ||
586 | u16 *data); | ||
587 | extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); | ||
588 | extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); | ||
589 | extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); | ||
590 | extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | ||
591 | u16 data); | ||
592 | extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); | ||
593 | extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); | ||
594 | extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); | ||
595 | extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); | ||
596 | extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); | ||
597 | extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); | ||
598 | extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); | ||
599 | extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); | ||
600 | extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); | ||
601 | extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); | ||
602 | extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); | ||
603 | extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); | ||
604 | extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, | ||
605 | u16 *phy_reg); | ||
606 | extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, | ||
607 | u16 *phy_reg); | ||
608 | extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); | ||
609 | extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); | ||
610 | extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); | ||
611 | extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); | ||
612 | extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | ||
613 | u16 data); | ||
614 | extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); | ||
615 | extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | ||
616 | u16 *data); | ||
617 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | ||
618 | u32 usec_interval, bool *success); | ||
619 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); | ||
620 | extern void e1000_power_up_phy_copper(struct e1000_hw *hw); | ||
621 | extern void e1000_power_down_phy_copper(struct e1000_hw *hw); | ||
622 | extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | ||
623 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | ||
624 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); | ||
625 | extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); | ||
626 | extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | ||
627 | u16 *data); | ||
628 | extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, | ||
629 | u16 *data); | ||
630 | extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); | ||
631 | extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | ||
632 | u16 data); | ||
633 | extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, | ||
634 | u16 data); | ||
635 | extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); | ||
636 | extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); | ||
637 | extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); | ||
638 | extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); | ||
639 | extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); | ||
640 | extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); | ||
641 | |||
642 | extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); | ||
643 | extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); | ||
644 | extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); | ||
645 | extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); | ||
646 | extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); | ||
647 | extern bool e1000_check_phy_82574(struct e1000_hw *hw); | ||
648 | |||
649 | static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) | ||
650 | { | ||
651 | return hw->phy.ops.reset(hw); | ||
652 | } | ||
653 | |||
654 | static inline s32 e1000_check_reset_block(struct e1000_hw *hw) | ||
655 | { | ||
656 | return hw->phy.ops.check_reset_block(hw); | ||
657 | } | ||
658 | |||
659 | static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) | ||
660 | { | ||
661 | return hw->phy.ops.read_reg(hw, offset, data); | ||
662 | } | ||
663 | |||
664 | static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) | ||
665 | { | ||
666 | return hw->phy.ops.write_reg(hw, offset, data); | ||
667 | } | ||
668 | |||
669 | static inline s32 e1000_get_cable_length(struct e1000_hw *hw) | ||
670 | { | ||
671 | return hw->phy.ops.get_cable_length(hw); | ||
672 | } | ||
673 | |||
674 | extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); | ||
675 | extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | ||
676 | extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); | ||
677 | extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); | ||
678 | extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | ||
679 | extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); | ||
680 | extern void e1000e_release_nvm(struct e1000_hw *hw); | ||
681 | extern void e1000e_reload_nvm(struct e1000_hw *hw); | ||
682 | extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); | ||
683 | |||
684 | static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) | ||
685 | { | ||
686 | if (hw->mac.ops.read_mac_addr) | ||
687 | return hw->mac.ops.read_mac_addr(hw); | ||
688 | |||
689 | return e1000_read_mac_addr_generic(hw); | ||
690 | } | ||
691 | |||
692 | static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) | ||
693 | { | ||
694 | return hw->nvm.ops.validate(hw); | ||
695 | } | ||
696 | |||
697 | static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) | ||
698 | { | ||
699 | return hw->nvm.ops.update(hw); | ||
700 | } | ||
701 | |||
702 | static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | ||
703 | { | ||
704 | return hw->nvm.ops.read(hw, offset, words, data); | ||
705 | } | ||
706 | |||
707 | static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | ||
708 | { | ||
709 | return hw->nvm.ops.write(hw, offset, words, data); | ||
710 | } | ||
711 | |||
712 | static inline s32 e1000_get_phy_info(struct e1000_hw *hw) | ||
713 | { | ||
714 | return hw->phy.ops.get_info(hw); | ||
715 | } | ||
716 | |||
717 | static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) | ||
718 | { | ||
719 | return hw->mac.ops.check_mng_mode(hw); | ||
720 | } | ||
721 | |||
722 | extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); | ||
723 | extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); | ||
724 | extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); | ||
725 | |||
726 | static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) | ||
727 | { | ||
728 | return readl(hw->hw_addr + reg); | ||
729 | } | ||
730 | |||
731 | static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) | ||
732 | { | ||
733 | writel(val, hw->hw_addr + reg); | ||
734 | } | ||
735 | |||
736 | #endif /* _E1000_H_ */ | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c new file mode 100644 index 000000000000..06d88f316dce --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -0,0 +1,2081 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* ethtool support for e1000 */ | ||
30 | |||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/delay.h> | ||
37 | |||
38 | #include "e1000.h" | ||
39 | |||
40 | enum {NETDEV_STATS, E1000_STATS}; | ||
41 | |||
42 | struct e1000_stats { | ||
43 | char stat_string[ETH_GSTRING_LEN]; | ||
44 | int type; | ||
45 | int sizeof_stat; | ||
46 | int stat_offset; | ||
47 | }; | ||
48 | |||
49 | #define E1000_STAT(str, m) { \ | ||
50 | .stat_string = str, \ | ||
51 | .type = E1000_STATS, \ | ||
52 | .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ | ||
53 | .stat_offset = offsetof(struct e1000_adapter, m) } | ||
54 | #define E1000_NETDEV_STAT(str, m) { \ | ||
55 | .stat_string = str, \ | ||
56 | .type = NETDEV_STATS, \ | ||
57 | .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ | ||
58 | .stat_offset = offsetof(struct rtnl_link_stats64, m) } | ||
59 | |||
60 | static const struct e1000_stats e1000_gstrings_stats[] = { | ||
61 | E1000_STAT("rx_packets", stats.gprc), | ||
62 | E1000_STAT("tx_packets", stats.gptc), | ||
63 | E1000_STAT("rx_bytes", stats.gorc), | ||
64 | E1000_STAT("tx_bytes", stats.gotc), | ||
65 | E1000_STAT("rx_broadcast", stats.bprc), | ||
66 | E1000_STAT("tx_broadcast", stats.bptc), | ||
67 | E1000_STAT("rx_multicast", stats.mprc), | ||
68 | E1000_STAT("tx_multicast", stats.mptc), | ||
69 | E1000_NETDEV_STAT("rx_errors", rx_errors), | ||
70 | E1000_NETDEV_STAT("tx_errors", tx_errors), | ||
71 | E1000_NETDEV_STAT("tx_dropped", tx_dropped), | ||
72 | E1000_STAT("multicast", stats.mprc), | ||
73 | E1000_STAT("collisions", stats.colc), | ||
74 | E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), | ||
75 | E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), | ||
76 | E1000_STAT("rx_crc_errors", stats.crcerrs), | ||
77 | E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), | ||
78 | E1000_STAT("rx_no_buffer_count", stats.rnbc), | ||
79 | E1000_STAT("rx_missed_errors", stats.mpc), | ||
80 | E1000_STAT("tx_aborted_errors", stats.ecol), | ||
81 | E1000_STAT("tx_carrier_errors", stats.tncrs), | ||
82 | E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), | ||
83 | E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), | ||
84 | E1000_STAT("tx_window_errors", stats.latecol), | ||
85 | E1000_STAT("tx_abort_late_coll", stats.latecol), | ||
86 | E1000_STAT("tx_deferred_ok", stats.dc), | ||
87 | E1000_STAT("tx_single_coll_ok", stats.scc), | ||
88 | E1000_STAT("tx_multi_coll_ok", stats.mcc), | ||
89 | E1000_STAT("tx_timeout_count", tx_timeout_count), | ||
90 | E1000_STAT("tx_restart_queue", restart_queue), | ||
91 | E1000_STAT("rx_long_length_errors", stats.roc), | ||
92 | E1000_STAT("rx_short_length_errors", stats.ruc), | ||
93 | E1000_STAT("rx_align_errors", stats.algnerrc), | ||
94 | E1000_STAT("tx_tcp_seg_good", stats.tsctc), | ||
95 | E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), | ||
96 | E1000_STAT("rx_flow_control_xon", stats.xonrxc), | ||
97 | E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), | ||
98 | E1000_STAT("tx_flow_control_xon", stats.xontxc), | ||
99 | E1000_STAT("tx_flow_control_xoff", stats.xofftxc), | ||
100 | E1000_STAT("rx_long_byte_count", stats.gorc), | ||
101 | E1000_STAT("rx_csum_offload_good", hw_csum_good), | ||
102 | E1000_STAT("rx_csum_offload_errors", hw_csum_err), | ||
103 | E1000_STAT("rx_header_split", rx_hdr_split), | ||
104 | E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), | ||
105 | E1000_STAT("tx_smbus", stats.mgptc), | ||
106 | E1000_STAT("rx_smbus", stats.mgprc), | ||
107 | E1000_STAT("dropped_smbus", stats.mgpdc), | ||
108 | E1000_STAT("rx_dma_failed", rx_dma_failed), | ||
109 | E1000_STAT("tx_dma_failed", tx_dma_failed), | ||
110 | }; | ||
111 | |||
112 | #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) | ||
113 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) | ||
114 | static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { | ||
115 | "Register test (offline)", "Eeprom test (offline)", | ||
116 | "Interrupt test (offline)", "Loopback test (offline)", | ||
117 | "Link test (on/offline)" | ||
118 | }; | ||
119 | #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) | ||
120 | |||
121 | static int e1000_get_settings(struct net_device *netdev, | ||
122 | struct ethtool_cmd *ecmd) | ||
123 | { | ||
124 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
125 | struct e1000_hw *hw = &adapter->hw; | ||
126 | u32 speed; | ||
127 | |||
128 | if (hw->phy.media_type == e1000_media_type_copper) { | ||
129 | |||
130 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
131 | SUPPORTED_10baseT_Full | | ||
132 | SUPPORTED_100baseT_Half | | ||
133 | SUPPORTED_100baseT_Full | | ||
134 | SUPPORTED_1000baseT_Full | | ||
135 | SUPPORTED_Autoneg | | ||
136 | SUPPORTED_TP); | ||
137 | if (hw->phy.type == e1000_phy_ife) | ||
138 | ecmd->supported &= ~SUPPORTED_1000baseT_Full; | ||
139 | ecmd->advertising = ADVERTISED_TP; | ||
140 | |||
141 | if (hw->mac.autoneg == 1) { | ||
142 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
143 | /* the e1000 autoneg seems to match ethtool nicely */ | ||
144 | ecmd->advertising |= hw->phy.autoneg_advertised; | ||
145 | } | ||
146 | |||
147 | ecmd->port = PORT_TP; | ||
148 | ecmd->phy_address = hw->phy.addr; | ||
149 | ecmd->transceiver = XCVR_INTERNAL; | ||
150 | |||
151 | } else { | ||
152 | ecmd->supported = (SUPPORTED_1000baseT_Full | | ||
153 | SUPPORTED_FIBRE | | ||
154 | SUPPORTED_Autoneg); | ||
155 | |||
156 | ecmd->advertising = (ADVERTISED_1000baseT_Full | | ||
157 | ADVERTISED_FIBRE | | ||
158 | ADVERTISED_Autoneg); | ||
159 | |||
160 | ecmd->port = PORT_FIBRE; | ||
161 | ecmd->transceiver = XCVR_EXTERNAL; | ||
162 | } | ||
163 | |||
164 | speed = -1; | ||
165 | ecmd->duplex = -1; | ||
166 | |||
167 | if (netif_running(netdev)) { | ||
168 | if (netif_carrier_ok(netdev)) { | ||
169 | speed = adapter->link_speed; | ||
170 | ecmd->duplex = adapter->link_duplex - 1; | ||
171 | } | ||
172 | } else { | ||
173 | u32 status = er32(STATUS); | ||
174 | if (status & E1000_STATUS_LU) { | ||
175 | if (status & E1000_STATUS_SPEED_1000) | ||
176 | speed = SPEED_1000; | ||
177 | else if (status & E1000_STATUS_SPEED_100) | ||
178 | speed = SPEED_100; | ||
179 | else | ||
180 | speed = SPEED_10; | ||
181 | |||
182 | if (status & E1000_STATUS_FD) | ||
183 | ecmd->duplex = DUPLEX_FULL; | ||
184 | else | ||
185 | ecmd->duplex = DUPLEX_HALF; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | ethtool_cmd_speed_set(ecmd, speed); | ||
190 | ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || | ||
191 | hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
192 | |||
193 | /* MDI-X => 2; MDI =>1; Invalid =>0 */ | ||
194 | if ((hw->phy.media_type == e1000_media_type_copper) && | ||
195 | netif_carrier_ok(netdev)) | ||
196 | ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : | ||
197 | ETH_TP_MDI; | ||
198 | else | ||
199 | ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) | ||
205 | { | ||
206 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
207 | |||
208 | mac->autoneg = 0; | ||
209 | |||
210 | /* Make sure dplx is at most 1 bit and lsb of speed is not set | ||
211 | * for the switch() below to work */ | ||
212 | if ((spd & 1) || (dplx & ~1)) | ||
213 | goto err_inval; | ||
214 | |||
215 | /* Fiber NICs only allow 1000 gbps Full duplex */ | ||
216 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && | ||
217 | spd != SPEED_1000 && | ||
218 | dplx != DUPLEX_FULL) { | ||
219 | goto err_inval; | ||
220 | } | ||
221 | |||
222 | switch (spd + dplx) { | ||
223 | case SPEED_10 + DUPLEX_HALF: | ||
224 | mac->forced_speed_duplex = ADVERTISE_10_HALF; | ||
225 | break; | ||
226 | case SPEED_10 + DUPLEX_FULL: | ||
227 | mac->forced_speed_duplex = ADVERTISE_10_FULL; | ||
228 | break; | ||
229 | case SPEED_100 + DUPLEX_HALF: | ||
230 | mac->forced_speed_duplex = ADVERTISE_100_HALF; | ||
231 | break; | ||
232 | case SPEED_100 + DUPLEX_FULL: | ||
233 | mac->forced_speed_duplex = ADVERTISE_100_FULL; | ||
234 | break; | ||
235 | case SPEED_1000 + DUPLEX_FULL: | ||
236 | mac->autoneg = 1; | ||
237 | adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; | ||
238 | break; | ||
239 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | ||
240 | default: | ||
241 | goto err_inval; | ||
242 | } | ||
243 | return 0; | ||
244 | |||
245 | err_inval: | ||
246 | e_err("Unsupported Speed/Duplex configuration\n"); | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | |||
250 | static int e1000_set_settings(struct net_device *netdev, | ||
251 | struct ethtool_cmd *ecmd) | ||
252 | { | ||
253 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
254 | struct e1000_hw *hw = &adapter->hw; | ||
255 | |||
256 | /* | ||
257 | * When SoL/IDER sessions are active, autoneg/speed/duplex | ||
258 | * cannot be changed | ||
259 | */ | ||
260 | if (e1000_check_reset_block(hw)) { | ||
261 | e_err("Cannot change link characteristics when SoL/IDER is " | ||
262 | "active.\n"); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | |||
266 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
267 | usleep_range(1000, 2000); | ||
268 | |||
269 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
270 | hw->mac.autoneg = 1; | ||
271 | if (hw->phy.media_type == e1000_media_type_fiber) | ||
272 | hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | | ||
273 | ADVERTISED_FIBRE | | ||
274 | ADVERTISED_Autoneg; | ||
275 | else | ||
276 | hw->phy.autoneg_advertised = ecmd->advertising | | ||
277 | ADVERTISED_TP | | ||
278 | ADVERTISED_Autoneg; | ||
279 | ecmd->advertising = hw->phy.autoneg_advertised; | ||
280 | if (adapter->fc_autoneg) | ||
281 | hw->fc.requested_mode = e1000_fc_default; | ||
282 | } else { | ||
283 | u32 speed = ethtool_cmd_speed(ecmd); | ||
284 | if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { | ||
285 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
286 | return -EINVAL; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* reset the link */ | ||
291 | |||
292 | if (netif_running(adapter->netdev)) { | ||
293 | e1000e_down(adapter); | ||
294 | e1000e_up(adapter); | ||
295 | } else { | ||
296 | e1000e_reset(adapter); | ||
297 | } | ||
298 | |||
299 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void e1000_get_pauseparam(struct net_device *netdev, | ||
304 | struct ethtool_pauseparam *pause) | ||
305 | { | ||
306 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
307 | struct e1000_hw *hw = &adapter->hw; | ||
308 | |||
309 | pause->autoneg = | ||
310 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | ||
311 | |||
312 | if (hw->fc.current_mode == e1000_fc_rx_pause) { | ||
313 | pause->rx_pause = 1; | ||
314 | } else if (hw->fc.current_mode == e1000_fc_tx_pause) { | ||
315 | pause->tx_pause = 1; | ||
316 | } else if (hw->fc.current_mode == e1000_fc_full) { | ||
317 | pause->rx_pause = 1; | ||
318 | pause->tx_pause = 1; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static int e1000_set_pauseparam(struct net_device *netdev, | ||
323 | struct ethtool_pauseparam *pause) | ||
324 | { | ||
325 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
326 | struct e1000_hw *hw = &adapter->hw; | ||
327 | int retval = 0; | ||
328 | |||
329 | adapter->fc_autoneg = pause->autoneg; | ||
330 | |||
331 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
332 | usleep_range(1000, 2000); | ||
333 | |||
334 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { | ||
335 | hw->fc.requested_mode = e1000_fc_default; | ||
336 | if (netif_running(adapter->netdev)) { | ||
337 | e1000e_down(adapter); | ||
338 | e1000e_up(adapter); | ||
339 | } else { | ||
340 | e1000e_reset(adapter); | ||
341 | } | ||
342 | } else { | ||
343 | if (pause->rx_pause && pause->tx_pause) | ||
344 | hw->fc.requested_mode = e1000_fc_full; | ||
345 | else if (pause->rx_pause && !pause->tx_pause) | ||
346 | hw->fc.requested_mode = e1000_fc_rx_pause; | ||
347 | else if (!pause->rx_pause && pause->tx_pause) | ||
348 | hw->fc.requested_mode = e1000_fc_tx_pause; | ||
349 | else if (!pause->rx_pause && !pause->tx_pause) | ||
350 | hw->fc.requested_mode = e1000_fc_none; | ||
351 | |||
352 | hw->fc.current_mode = hw->fc.requested_mode; | ||
353 | |||
354 | if (hw->phy.media_type == e1000_media_type_fiber) { | ||
355 | retval = hw->mac.ops.setup_link(hw); | ||
356 | /* implicit goto out */ | ||
357 | } else { | ||
358 | retval = e1000e_force_mac_fc(hw); | ||
359 | if (retval) | ||
360 | goto out; | ||
361 | e1000e_set_fc_watermarks(hw); | ||
362 | } | ||
363 | } | ||
364 | |||
365 | out: | ||
366 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
367 | return retval; | ||
368 | } | ||
369 | |||
370 | static u32 e1000_get_rx_csum(struct net_device *netdev) | ||
371 | { | ||
372 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
373 | return adapter->flags & FLAG_RX_CSUM_ENABLED; | ||
374 | } | ||
375 | |||
376 | static int e1000_set_rx_csum(struct net_device *netdev, u32 data) | ||
377 | { | ||
378 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
379 | |||
380 | if (data) | ||
381 | adapter->flags |= FLAG_RX_CSUM_ENABLED; | ||
382 | else | ||
383 | adapter->flags &= ~FLAG_RX_CSUM_ENABLED; | ||
384 | |||
385 | if (netif_running(netdev)) | ||
386 | e1000e_reinit_locked(adapter); | ||
387 | else | ||
388 | e1000e_reset(adapter); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static u32 e1000_get_tx_csum(struct net_device *netdev) | ||
393 | { | ||
394 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | ||
395 | } | ||
396 | |||
397 | static int e1000_set_tx_csum(struct net_device *netdev, u32 data) | ||
398 | { | ||
399 | if (data) | ||
400 | netdev->features |= NETIF_F_HW_CSUM; | ||
401 | else | ||
402 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int e1000_set_tso(struct net_device *netdev, u32 data) | ||
408 | { | ||
409 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
410 | |||
411 | if (data) { | ||
412 | netdev->features |= NETIF_F_TSO; | ||
413 | netdev->features |= NETIF_F_TSO6; | ||
414 | } else { | ||
415 | netdev->features &= ~NETIF_F_TSO; | ||
416 | netdev->features &= ~NETIF_F_TSO6; | ||
417 | } | ||
418 | |||
419 | adapter->flags |= FLAG_TSO_FORCE; | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static u32 e1000_get_msglevel(struct net_device *netdev) | ||
424 | { | ||
425 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
426 | return adapter->msg_enable; | ||
427 | } | ||
428 | |||
429 | static void e1000_set_msglevel(struct net_device *netdev, u32 data) | ||
430 | { | ||
431 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
432 | adapter->msg_enable = data; | ||
433 | } | ||
434 | |||
435 | static int e1000_get_regs_len(struct net_device *netdev) | ||
436 | { | ||
437 | #define E1000_REGS_LEN 32 /* overestimate */ | ||
438 | return E1000_REGS_LEN * sizeof(u32); | ||
439 | } | ||
440 | |||
441 | static void e1000_get_regs(struct net_device *netdev, | ||
442 | struct ethtool_regs *regs, void *p) | ||
443 | { | ||
444 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
445 | struct e1000_hw *hw = &adapter->hw; | ||
446 | u32 *regs_buff = p; | ||
447 | u16 phy_data; | ||
448 | |||
449 | memset(p, 0, E1000_REGS_LEN * sizeof(u32)); | ||
450 | |||
451 | regs->version = (1 << 24) | (adapter->pdev->revision << 16) | | ||
452 | adapter->pdev->device; | ||
453 | |||
454 | regs_buff[0] = er32(CTRL); | ||
455 | regs_buff[1] = er32(STATUS); | ||
456 | |||
457 | regs_buff[2] = er32(RCTL); | ||
458 | regs_buff[3] = er32(RDLEN); | ||
459 | regs_buff[4] = er32(RDH); | ||
460 | regs_buff[5] = er32(RDT); | ||
461 | regs_buff[6] = er32(RDTR); | ||
462 | |||
463 | regs_buff[7] = er32(TCTL); | ||
464 | regs_buff[8] = er32(TDLEN); | ||
465 | regs_buff[9] = er32(TDH); | ||
466 | regs_buff[10] = er32(TDT); | ||
467 | regs_buff[11] = er32(TIDV); | ||
468 | |||
469 | regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ | ||
470 | |||
471 | /* ethtool doesn't use anything past this point, so all this | ||
472 | * code is likely legacy junk for apps that may or may not | ||
473 | * exist */ | ||
474 | if (hw->phy.type == e1000_phy_m88) { | ||
475 | e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | ||
476 | regs_buff[13] = (u32)phy_data; /* cable length */ | ||
477 | regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
478 | regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
479 | regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
480 | e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
481 | regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ | ||
482 | regs_buff[18] = regs_buff[13]; /* cable polarity */ | ||
483 | regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
484 | regs_buff[20] = regs_buff[17]; /* polarity correction */ | ||
485 | /* phy receive errors */ | ||
486 | regs_buff[22] = adapter->phy_stats.receive_errors; | ||
487 | regs_buff[23] = regs_buff[13]; /* mdix mode */ | ||
488 | } | ||
489 | regs_buff[21] = 0; /* was idle_errors */ | ||
490 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | ||
491 | regs_buff[24] = (u32)phy_data; /* phy local receiver status */ | ||
492 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | ||
493 | } | ||
494 | |||
495 | static int e1000_get_eeprom_len(struct net_device *netdev) | ||
496 | { | ||
497 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
498 | return adapter->hw.nvm.word_size * 2; | ||
499 | } | ||
500 | |||
501 | static int e1000_get_eeprom(struct net_device *netdev, | ||
502 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
503 | { | ||
504 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
505 | struct e1000_hw *hw = &adapter->hw; | ||
506 | u16 *eeprom_buff; | ||
507 | int first_word; | ||
508 | int last_word; | ||
509 | int ret_val = 0; | ||
510 | u16 i; | ||
511 | |||
512 | if (eeprom->len == 0) | ||
513 | return -EINVAL; | ||
514 | |||
515 | eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); | ||
516 | |||
517 | first_word = eeprom->offset >> 1; | ||
518 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
519 | |||
520 | eeprom_buff = kmalloc(sizeof(u16) * | ||
521 | (last_word - first_word + 1), GFP_KERNEL); | ||
522 | if (!eeprom_buff) | ||
523 | return -ENOMEM; | ||
524 | |||
525 | if (hw->nvm.type == e1000_nvm_eeprom_spi) { | ||
526 | ret_val = e1000_read_nvm(hw, first_word, | ||
527 | last_word - first_word + 1, | ||
528 | eeprom_buff); | ||
529 | } else { | ||
530 | for (i = 0; i < last_word - first_word + 1; i++) { | ||
531 | ret_val = e1000_read_nvm(hw, first_word + i, 1, | ||
532 | &eeprom_buff[i]); | ||
533 | if (ret_val) | ||
534 | break; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | if (ret_val) { | ||
539 | /* a read error occurred, throw away the result */ | ||
540 | memset(eeprom_buff, 0xff, sizeof(u16) * | ||
541 | (last_word - first_word + 1)); | ||
542 | } else { | ||
543 | /* Device's eeprom is always little-endian, word addressable */ | ||
544 | for (i = 0; i < last_word - first_word + 1; i++) | ||
545 | le16_to_cpus(&eeprom_buff[i]); | ||
546 | } | ||
547 | |||
548 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); | ||
549 | kfree(eeprom_buff); | ||
550 | |||
551 | return ret_val; | ||
552 | } | ||
553 | |||
554 | static int e1000_set_eeprom(struct net_device *netdev, | ||
555 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
556 | { | ||
557 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
558 | struct e1000_hw *hw = &adapter->hw; | ||
559 | u16 *eeprom_buff; | ||
560 | void *ptr; | ||
561 | int max_len; | ||
562 | int first_word; | ||
563 | int last_word; | ||
564 | int ret_val = 0; | ||
565 | u16 i; | ||
566 | |||
567 | if (eeprom->len == 0) | ||
568 | return -EOPNOTSUPP; | ||
569 | |||
570 | if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) | ||
571 | return -EFAULT; | ||
572 | |||
573 | if (adapter->flags & FLAG_READ_ONLY_NVM) | ||
574 | return -EINVAL; | ||
575 | |||
576 | max_len = hw->nvm.word_size * 2; | ||
577 | |||
578 | first_word = eeprom->offset >> 1; | ||
579 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
580 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | ||
581 | if (!eeprom_buff) | ||
582 | return -ENOMEM; | ||
583 | |||
584 | ptr = (void *)eeprom_buff; | ||
585 | |||
586 | if (eeprom->offset & 1) { | ||
587 | /* need read/modify/write of first changed EEPROM word */ | ||
588 | /* only the second byte of the word is being modified */ | ||
589 | ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); | ||
590 | ptr++; | ||
591 | } | ||
592 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) | ||
593 | /* need read/modify/write of last changed EEPROM word */ | ||
594 | /* only the first byte of the word is being modified */ | ||
595 | ret_val = e1000_read_nvm(hw, last_word, 1, | ||
596 | &eeprom_buff[last_word - first_word]); | ||
597 | |||
598 | if (ret_val) | ||
599 | goto out; | ||
600 | |||
601 | /* Device's eeprom is always little-endian, word addressable */ | ||
602 | for (i = 0; i < last_word - first_word + 1; i++) | ||
603 | le16_to_cpus(&eeprom_buff[i]); | ||
604 | |||
605 | memcpy(ptr, bytes, eeprom->len); | ||
606 | |||
607 | for (i = 0; i < last_word - first_word + 1; i++) | ||
608 | eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); | ||
609 | |||
610 | ret_val = e1000_write_nvm(hw, first_word, | ||
611 | last_word - first_word + 1, eeprom_buff); | ||
612 | |||
613 | if (ret_val) | ||
614 | goto out; | ||
615 | |||
616 | /* | ||
617 | * Update the checksum over the first part of the EEPROM if needed | ||
618 | * and flush shadow RAM for applicable controllers | ||
619 | */ | ||
620 | if ((first_word <= NVM_CHECKSUM_REG) || | ||
621 | (hw->mac.type == e1000_82583) || | ||
622 | (hw->mac.type == e1000_82574) || | ||
623 | (hw->mac.type == e1000_82573)) | ||
624 | ret_val = e1000e_update_nvm_checksum(hw); | ||
625 | |||
626 | out: | ||
627 | kfree(eeprom_buff); | ||
628 | return ret_val; | ||
629 | } | ||
630 | |||
631 | static void e1000_get_drvinfo(struct net_device *netdev, | ||
632 | struct ethtool_drvinfo *drvinfo) | ||
633 | { | ||
634 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
635 | char firmware_version[32]; | ||
636 | |||
637 | strncpy(drvinfo->driver, e1000e_driver_name, | ||
638 | sizeof(drvinfo->driver) - 1); | ||
639 | strncpy(drvinfo->version, e1000e_driver_version, | ||
640 | sizeof(drvinfo->version) - 1); | ||
641 | |||
642 | /* | ||
643 | * EEPROM image version # is reported as firmware version # for | ||
644 | * PCI-E controllers | ||
645 | */ | ||
646 | snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", | ||
647 | (adapter->eeprom_vers & 0xF000) >> 12, | ||
648 | (adapter->eeprom_vers & 0x0FF0) >> 4, | ||
649 | (adapter->eeprom_vers & 0x000F)); | ||
650 | |||
651 | strncpy(drvinfo->fw_version, firmware_version, | ||
652 | sizeof(drvinfo->fw_version) - 1); | ||
653 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), | ||
654 | sizeof(drvinfo->bus_info) - 1); | ||
655 | drvinfo->regdump_len = e1000_get_regs_len(netdev); | ||
656 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); | ||
657 | } | ||
658 | |||
659 | static void e1000_get_ringparam(struct net_device *netdev, | ||
660 | struct ethtool_ringparam *ring) | ||
661 | { | ||
662 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
663 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
664 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
665 | |||
666 | ring->rx_max_pending = E1000_MAX_RXD; | ||
667 | ring->tx_max_pending = E1000_MAX_TXD; | ||
668 | ring->rx_mini_max_pending = 0; | ||
669 | ring->rx_jumbo_max_pending = 0; | ||
670 | ring->rx_pending = rx_ring->count; | ||
671 | ring->tx_pending = tx_ring->count; | ||
672 | ring->rx_mini_pending = 0; | ||
673 | ring->rx_jumbo_pending = 0; | ||
674 | } | ||
675 | |||
676 | static int e1000_set_ringparam(struct net_device *netdev, | ||
677 | struct ethtool_ringparam *ring) | ||
678 | { | ||
679 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
680 | struct e1000_ring *tx_ring, *tx_old; | ||
681 | struct e1000_ring *rx_ring, *rx_old; | ||
682 | int err; | ||
683 | |||
684 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
685 | return -EINVAL; | ||
686 | |||
687 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
688 | usleep_range(1000, 2000); | ||
689 | |||
690 | if (netif_running(adapter->netdev)) | ||
691 | e1000e_down(adapter); | ||
692 | |||
693 | tx_old = adapter->tx_ring; | ||
694 | rx_old = adapter->rx_ring; | ||
695 | |||
696 | err = -ENOMEM; | ||
697 | tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); | ||
698 | if (!tx_ring) | ||
699 | goto err_alloc_tx; | ||
700 | |||
701 | rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); | ||
702 | if (!rx_ring) | ||
703 | goto err_alloc_rx; | ||
704 | |||
705 | adapter->tx_ring = tx_ring; | ||
706 | adapter->rx_ring = rx_ring; | ||
707 | |||
708 | rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); | ||
709 | rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); | ||
710 | rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); | ||
711 | |||
712 | tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); | ||
713 | tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); | ||
714 | tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); | ||
715 | |||
716 | if (netif_running(adapter->netdev)) { | ||
717 | /* Try to get new resources before deleting old */ | ||
718 | err = e1000e_setup_rx_resources(adapter); | ||
719 | if (err) | ||
720 | goto err_setup_rx; | ||
721 | err = e1000e_setup_tx_resources(adapter); | ||
722 | if (err) | ||
723 | goto err_setup_tx; | ||
724 | |||
725 | /* | ||
726 | * restore the old in order to free it, | ||
727 | * then add in the new | ||
728 | */ | ||
729 | adapter->rx_ring = rx_old; | ||
730 | adapter->tx_ring = tx_old; | ||
731 | e1000e_free_rx_resources(adapter); | ||
732 | e1000e_free_tx_resources(adapter); | ||
733 | kfree(tx_old); | ||
734 | kfree(rx_old); | ||
735 | adapter->rx_ring = rx_ring; | ||
736 | adapter->tx_ring = tx_ring; | ||
737 | err = e1000e_up(adapter); | ||
738 | if (err) | ||
739 | goto err_setup; | ||
740 | } | ||
741 | |||
742 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
743 | return 0; | ||
744 | err_setup_tx: | ||
745 | e1000e_free_rx_resources(adapter); | ||
746 | err_setup_rx: | ||
747 | adapter->rx_ring = rx_old; | ||
748 | adapter->tx_ring = tx_old; | ||
749 | kfree(rx_ring); | ||
750 | err_alloc_rx: | ||
751 | kfree(tx_ring); | ||
752 | err_alloc_tx: | ||
753 | e1000e_up(adapter); | ||
754 | err_setup: | ||
755 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
756 | return err; | ||
757 | } | ||
758 | |||
759 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, | ||
760 | int reg, int offset, u32 mask, u32 write) | ||
761 | { | ||
762 | u32 pat, val; | ||
763 | static const u32 test[] = { | ||
764 | 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | ||
765 | for (pat = 0; pat < ARRAY_SIZE(test); pat++) { | ||
766 | E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, | ||
767 | (test[pat] & write)); | ||
768 | val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); | ||
769 | if (val != (test[pat] & write & mask)) { | ||
770 | e_err("pattern test reg %04X failed: got 0x%08X " | ||
771 | "expected 0x%08X\n", reg + offset, val, | ||
772 | (test[pat] & write & mask)); | ||
773 | *data = reg; | ||
774 | return 1; | ||
775 | } | ||
776 | } | ||
777 | return 0; | ||
778 | } | ||
779 | |||
780 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | ||
781 | int reg, u32 mask, u32 write) | ||
782 | { | ||
783 | u32 val; | ||
784 | __ew32(&adapter->hw, reg, write & mask); | ||
785 | val = __er32(&adapter->hw, reg); | ||
786 | if ((write & mask) != (val & mask)) { | ||
787 | e_err("set/check reg %04X test failed: got 0x%08X " | ||
788 | "expected 0x%08X\n", reg, (val & mask), (write & mask)); | ||
789 | *data = reg; | ||
790 | return 1; | ||
791 | } | ||
792 | return 0; | ||
793 | } | ||
794 | #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ | ||
795 | do { \ | ||
796 | if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ | ||
797 | return 1; \ | ||
798 | } while (0) | ||
799 | #define REG_PATTERN_TEST(reg, mask, write) \ | ||
800 | REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) | ||
801 | |||
802 | #define REG_SET_AND_CHECK(reg, mask, write) \ | ||
803 | do { \ | ||
804 | if (reg_set_and_check(adapter, data, reg, mask, write)) \ | ||
805 | return 1; \ | ||
806 | } while (0) | ||
807 | |||
808 | static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | ||
809 | { | ||
810 | struct e1000_hw *hw = &adapter->hw; | ||
811 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
812 | u32 value; | ||
813 | u32 before; | ||
814 | u32 after; | ||
815 | u32 i; | ||
816 | u32 toggle; | ||
817 | u32 mask; | ||
818 | |||
819 | /* | ||
820 | * The status register is Read Only, so a write should fail. | ||
821 | * Some bits that get toggled are ignored. | ||
822 | */ | ||
823 | switch (mac->type) { | ||
824 | /* there are several bits on newer hardware that are r/w */ | ||
825 | case e1000_82571: | ||
826 | case e1000_82572: | ||
827 | case e1000_80003es2lan: | ||
828 | toggle = 0x7FFFF3FF; | ||
829 | break; | ||
830 | default: | ||
831 | toggle = 0x7FFFF033; | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | before = er32(STATUS); | ||
836 | value = (er32(STATUS) & toggle); | ||
837 | ew32(STATUS, toggle); | ||
838 | after = er32(STATUS) & toggle; | ||
839 | if (value != after) { | ||
840 | e_err("failed STATUS register test got: 0x%08X expected: " | ||
841 | "0x%08X\n", after, value); | ||
842 | *data = 1; | ||
843 | return 1; | ||
844 | } | ||
845 | /* restore previous status */ | ||
846 | ew32(STATUS, before); | ||
847 | |||
848 | if (!(adapter->flags & FLAG_IS_ICH)) { | ||
849 | REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | ||
850 | REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); | ||
851 | REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); | ||
852 | REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); | ||
853 | } | ||
854 | |||
855 | REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); | ||
856 | REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | ||
857 | REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); | ||
858 | REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); | ||
859 | REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); | ||
860 | REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); | ||
861 | REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); | ||
862 | REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); | ||
863 | REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | ||
864 | REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); | ||
865 | |||
866 | REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); | ||
867 | |||
868 | before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); | ||
869 | REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); | ||
870 | REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); | ||
871 | |||
872 | REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); | ||
873 | REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | ||
874 | if (!(adapter->flags & FLAG_IS_ICH)) | ||
875 | REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); | ||
876 | REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | ||
877 | REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); | ||
878 | mask = 0x8003FFFF; | ||
879 | switch (mac->type) { | ||
880 | case e1000_ich10lan: | ||
881 | case e1000_pchlan: | ||
882 | case e1000_pch2lan: | ||
883 | mask |= (1 << 18); | ||
884 | break; | ||
885 | default: | ||
886 | break; | ||
887 | } | ||
888 | for (i = 0; i < mac->rar_entry_count; i++) | ||
889 | REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), | ||
890 | mask, 0xFFFFFFFF); | ||
891 | |||
892 | for (i = 0; i < mac->mta_reg_count; i++) | ||
893 | REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); | ||
894 | |||
895 | *data = 0; | ||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | ||
900 | { | ||
901 | u16 temp; | ||
902 | u16 checksum = 0; | ||
903 | u16 i; | ||
904 | |||
905 | *data = 0; | ||
906 | /* Read and add up the contents of the EEPROM */ | ||
907 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | ||
908 | if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { | ||
909 | *data = 1; | ||
910 | return *data; | ||
911 | } | ||
912 | checksum += temp; | ||
913 | } | ||
914 | |||
915 | /* If Checksum is not Correct return error else test passed */ | ||
916 | if ((checksum != (u16) NVM_SUM) && !(*data)) | ||
917 | *data = 2; | ||
918 | |||
919 | return *data; | ||
920 | } | ||
921 | |||
922 | static irqreturn_t e1000_test_intr(int irq, void *data) | ||
923 | { | ||
924 | struct net_device *netdev = (struct net_device *) data; | ||
925 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
926 | struct e1000_hw *hw = &adapter->hw; | ||
927 | |||
928 | adapter->test_icr |= er32(ICR); | ||
929 | |||
930 | return IRQ_HANDLED; | ||
931 | } | ||
932 | |||
933 | static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | ||
934 | { | ||
935 | struct net_device *netdev = adapter->netdev; | ||
936 | struct e1000_hw *hw = &adapter->hw; | ||
937 | u32 mask; | ||
938 | u32 shared_int = 1; | ||
939 | u32 irq = adapter->pdev->irq; | ||
940 | int i; | ||
941 | int ret_val = 0; | ||
942 | int int_mode = E1000E_INT_MODE_LEGACY; | ||
943 | |||
944 | *data = 0; | ||
945 | |||
946 | /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ | ||
947 | if (adapter->int_mode == E1000E_INT_MODE_MSIX) { | ||
948 | int_mode = adapter->int_mode; | ||
949 | e1000e_reset_interrupt_capability(adapter); | ||
950 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | ||
951 | e1000e_set_interrupt_capability(adapter); | ||
952 | } | ||
953 | /* Hook up test interrupt handler just for this test */ | ||
954 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | ||
955 | netdev)) { | ||
956 | shared_int = 0; | ||
957 | } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, | ||
958 | netdev->name, netdev)) { | ||
959 | *data = 1; | ||
960 | ret_val = -1; | ||
961 | goto out; | ||
962 | } | ||
963 | e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); | ||
964 | |||
965 | /* Disable all the interrupts */ | ||
966 | ew32(IMC, 0xFFFFFFFF); | ||
967 | e1e_flush(); | ||
968 | usleep_range(10000, 20000); | ||
969 | |||
970 | /* Test each interrupt */ | ||
971 | for (i = 0; i < 10; i++) { | ||
972 | /* Interrupt to test */ | ||
973 | mask = 1 << i; | ||
974 | |||
975 | if (adapter->flags & FLAG_IS_ICH) { | ||
976 | switch (mask) { | ||
977 | case E1000_ICR_RXSEQ: | ||
978 | continue; | ||
979 | case 0x00000100: | ||
980 | if (adapter->hw.mac.type == e1000_ich8lan || | ||
981 | adapter->hw.mac.type == e1000_ich9lan) | ||
982 | continue; | ||
983 | break; | ||
984 | default: | ||
985 | break; | ||
986 | } | ||
987 | } | ||
988 | |||
989 | if (!shared_int) { | ||
990 | /* | ||
991 | * Disable the interrupt to be reported in | ||
992 | * the cause register and then force the same | ||
993 | * interrupt and see if one gets posted. If | ||
994 | * an interrupt was posted to the bus, the | ||
995 | * test failed. | ||
996 | */ | ||
997 | adapter->test_icr = 0; | ||
998 | ew32(IMC, mask); | ||
999 | ew32(ICS, mask); | ||
1000 | e1e_flush(); | ||
1001 | usleep_range(10000, 20000); | ||
1002 | |||
1003 | if (adapter->test_icr & mask) { | ||
1004 | *data = 3; | ||
1005 | break; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | /* | ||
1010 | * Enable the interrupt to be reported in | ||
1011 | * the cause register and then force the same | ||
1012 | * interrupt and see if one gets posted. If | ||
1013 | * an interrupt was not posted to the bus, the | ||
1014 | * test failed. | ||
1015 | */ | ||
1016 | adapter->test_icr = 0; | ||
1017 | ew32(IMS, mask); | ||
1018 | ew32(ICS, mask); | ||
1019 | e1e_flush(); | ||
1020 | usleep_range(10000, 20000); | ||
1021 | |||
1022 | if (!(adapter->test_icr & mask)) { | ||
1023 | *data = 4; | ||
1024 | break; | ||
1025 | } | ||
1026 | |||
1027 | if (!shared_int) { | ||
1028 | /* | ||
1029 | * Disable the other interrupts to be reported in | ||
1030 | * the cause register and then force the other | ||
1031 | * interrupts and see if any get posted. If | ||
1032 | * an interrupt was posted to the bus, the | ||
1033 | * test failed. | ||
1034 | */ | ||
1035 | adapter->test_icr = 0; | ||
1036 | ew32(IMC, ~mask & 0x00007FFF); | ||
1037 | ew32(ICS, ~mask & 0x00007FFF); | ||
1038 | e1e_flush(); | ||
1039 | usleep_range(10000, 20000); | ||
1040 | |||
1041 | if (adapter->test_icr) { | ||
1042 | *data = 5; | ||
1043 | break; | ||
1044 | } | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | /* Disable all the interrupts */ | ||
1049 | ew32(IMC, 0xFFFFFFFF); | ||
1050 | e1e_flush(); | ||
1051 | usleep_range(10000, 20000); | ||
1052 | |||
1053 | /* Unhook test interrupt handler */ | ||
1054 | free_irq(irq, netdev); | ||
1055 | |||
1056 | out: | ||
1057 | if (int_mode == E1000E_INT_MODE_MSIX) { | ||
1058 | e1000e_reset_interrupt_capability(adapter); | ||
1059 | adapter->int_mode = int_mode; | ||
1060 | e1000e_set_interrupt_capability(adapter); | ||
1061 | } | ||
1062 | |||
1063 | return ret_val; | ||
1064 | } | ||
1065 | |||
1066 | static void e1000_free_desc_rings(struct e1000_adapter *adapter) | ||
1067 | { | ||
1068 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
1069 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
1070 | struct pci_dev *pdev = adapter->pdev; | ||
1071 | int i; | ||
1072 | |||
1073 | if (tx_ring->desc && tx_ring->buffer_info) { | ||
1074 | for (i = 0; i < tx_ring->count; i++) { | ||
1075 | if (tx_ring->buffer_info[i].dma) | ||
1076 | dma_unmap_single(&pdev->dev, | ||
1077 | tx_ring->buffer_info[i].dma, | ||
1078 | tx_ring->buffer_info[i].length, | ||
1079 | DMA_TO_DEVICE); | ||
1080 | if (tx_ring->buffer_info[i].skb) | ||
1081 | dev_kfree_skb(tx_ring->buffer_info[i].skb); | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | if (rx_ring->desc && rx_ring->buffer_info) { | ||
1086 | for (i = 0; i < rx_ring->count; i++) { | ||
1087 | if (rx_ring->buffer_info[i].dma) | ||
1088 | dma_unmap_single(&pdev->dev, | ||
1089 | rx_ring->buffer_info[i].dma, | ||
1090 | 2048, DMA_FROM_DEVICE); | ||
1091 | if (rx_ring->buffer_info[i].skb) | ||
1092 | dev_kfree_skb(rx_ring->buffer_info[i].skb); | ||
1093 | } | ||
1094 | } | ||
1095 | |||
1096 | if (tx_ring->desc) { | ||
1097 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | ||
1098 | tx_ring->dma); | ||
1099 | tx_ring->desc = NULL; | ||
1100 | } | ||
1101 | if (rx_ring->desc) { | ||
1102 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | ||
1103 | rx_ring->dma); | ||
1104 | rx_ring->desc = NULL; | ||
1105 | } | ||
1106 | |||
1107 | kfree(tx_ring->buffer_info); | ||
1108 | tx_ring->buffer_info = NULL; | ||
1109 | kfree(rx_ring->buffer_info); | ||
1110 | rx_ring->buffer_info = NULL; | ||
1111 | } | ||
1112 | |||
1113 | static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | ||
1114 | { | ||
1115 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
1116 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
1117 | struct pci_dev *pdev = adapter->pdev; | ||
1118 | struct e1000_hw *hw = &adapter->hw; | ||
1119 | u32 rctl; | ||
1120 | int i; | ||
1121 | int ret_val; | ||
1122 | |||
1123 | /* Setup Tx descriptor ring and Tx buffers */ | ||
1124 | |||
1125 | if (!tx_ring->count) | ||
1126 | tx_ring->count = E1000_DEFAULT_TXD; | ||
1127 | |||
1128 | tx_ring->buffer_info = kcalloc(tx_ring->count, | ||
1129 | sizeof(struct e1000_buffer), | ||
1130 | GFP_KERNEL); | ||
1131 | if (!(tx_ring->buffer_info)) { | ||
1132 | ret_val = 1; | ||
1133 | goto err_nomem; | ||
1134 | } | ||
1135 | |||
1136 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | ||
1137 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
1138 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, | ||
1139 | &tx_ring->dma, GFP_KERNEL); | ||
1140 | if (!tx_ring->desc) { | ||
1141 | ret_val = 2; | ||
1142 | goto err_nomem; | ||
1143 | } | ||
1144 | tx_ring->next_to_use = 0; | ||
1145 | tx_ring->next_to_clean = 0; | ||
1146 | |||
1147 | ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | ||
1148 | ew32(TDBAH, ((u64) tx_ring->dma >> 32)); | ||
1149 | ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); | ||
1150 | ew32(TDH, 0); | ||
1151 | ew32(TDT, 0); | ||
1152 | ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | | ||
1153 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | ||
1154 | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1155 | |||
1156 | for (i = 0; i < tx_ring->count; i++) { | ||
1157 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); | ||
1158 | struct sk_buff *skb; | ||
1159 | unsigned int skb_size = 1024; | ||
1160 | |||
1161 | skb = alloc_skb(skb_size, GFP_KERNEL); | ||
1162 | if (!skb) { | ||
1163 | ret_val = 3; | ||
1164 | goto err_nomem; | ||
1165 | } | ||
1166 | skb_put(skb, skb_size); | ||
1167 | tx_ring->buffer_info[i].skb = skb; | ||
1168 | tx_ring->buffer_info[i].length = skb->len; | ||
1169 | tx_ring->buffer_info[i].dma = | ||
1170 | dma_map_single(&pdev->dev, skb->data, skb->len, | ||
1171 | DMA_TO_DEVICE); | ||
1172 | if (dma_mapping_error(&pdev->dev, | ||
1173 | tx_ring->buffer_info[i].dma)) { | ||
1174 | ret_val = 4; | ||
1175 | goto err_nomem; | ||
1176 | } | ||
1177 | tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); | ||
1178 | tx_desc->lower.data = cpu_to_le32(skb->len); | ||
1179 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | ||
1180 | E1000_TXD_CMD_IFCS | | ||
1181 | E1000_TXD_CMD_RS); | ||
1182 | tx_desc->upper.data = 0; | ||
1183 | } | ||
1184 | |||
1185 | /* Setup Rx descriptor ring and Rx buffers */ | ||
1186 | |||
1187 | if (!rx_ring->count) | ||
1188 | rx_ring->count = E1000_DEFAULT_RXD; | ||
1189 | |||
1190 | rx_ring->buffer_info = kcalloc(rx_ring->count, | ||
1191 | sizeof(struct e1000_buffer), | ||
1192 | GFP_KERNEL); | ||
1193 | if (!(rx_ring->buffer_info)) { | ||
1194 | ret_val = 5; | ||
1195 | goto err_nomem; | ||
1196 | } | ||
1197 | |||
1198 | rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); | ||
1199 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, | ||
1200 | &rx_ring->dma, GFP_KERNEL); | ||
1201 | if (!rx_ring->desc) { | ||
1202 | ret_val = 6; | ||
1203 | goto err_nomem; | ||
1204 | } | ||
1205 | rx_ring->next_to_use = 0; | ||
1206 | rx_ring->next_to_clean = 0; | ||
1207 | |||
1208 | rctl = er32(RCTL); | ||
1209 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
1210 | ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); | ||
1211 | ew32(RDBAH, ((u64) rx_ring->dma >> 32)); | ||
1212 | ew32(RDLEN, rx_ring->size); | ||
1213 | ew32(RDH, 0); | ||
1214 | ew32(RDT, 0); | ||
1215 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | ||
1216 | E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | | ||
1217 | E1000_RCTL_SBP | E1000_RCTL_SECRC | | ||
1218 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | ||
1219 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | ||
1220 | ew32(RCTL, rctl); | ||
1221 | |||
1222 | for (i = 0; i < rx_ring->count; i++) { | ||
1223 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
1224 | struct sk_buff *skb; | ||
1225 | |||
1226 | skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); | ||
1227 | if (!skb) { | ||
1228 | ret_val = 7; | ||
1229 | goto err_nomem; | ||
1230 | } | ||
1231 | skb_reserve(skb, NET_IP_ALIGN); | ||
1232 | rx_ring->buffer_info[i].skb = skb; | ||
1233 | rx_ring->buffer_info[i].dma = | ||
1234 | dma_map_single(&pdev->dev, skb->data, 2048, | ||
1235 | DMA_FROM_DEVICE); | ||
1236 | if (dma_mapping_error(&pdev->dev, | ||
1237 | rx_ring->buffer_info[i].dma)) { | ||
1238 | ret_val = 8; | ||
1239 | goto err_nomem; | ||
1240 | } | ||
1241 | rx_desc->buffer_addr = | ||
1242 | cpu_to_le64(rx_ring->buffer_info[i].dma); | ||
1243 | memset(skb->data, 0x00, skb->len); | ||
1244 | } | ||
1245 | |||
1246 | return 0; | ||
1247 | |||
1248 | err_nomem: | ||
1249 | e1000_free_desc_rings(adapter); | ||
1250 | return ret_val; | ||
1251 | } | ||
1252 | |||
1253 | static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) | ||
1254 | { | ||
1255 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ | ||
1256 | e1e_wphy(&adapter->hw, 29, 0x001F); | ||
1257 | e1e_wphy(&adapter->hw, 30, 0x8FFC); | ||
1258 | e1e_wphy(&adapter->hw, 29, 0x001A); | ||
1259 | e1e_wphy(&adapter->hw, 30, 0x8FF0); | ||
1260 | } | ||
1261 | |||
1262 | static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | ||
1263 | { | ||
1264 | struct e1000_hw *hw = &adapter->hw; | ||
1265 | u32 ctrl_reg = 0; | ||
1266 | u16 phy_reg = 0; | ||
1267 | s32 ret_val = 0; | ||
1268 | |||
1269 | hw->mac.autoneg = 0; | ||
1270 | |||
1271 | if (hw->phy.type == e1000_phy_ife) { | ||
1272 | /* force 100, set loopback */ | ||
1273 | e1e_wphy(hw, PHY_CONTROL, 0x6100); | ||
1274 | |||
1275 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1276 | ctrl_reg = er32(CTRL); | ||
1277 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1278 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1279 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1280 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | ||
1281 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1282 | |||
1283 | ew32(CTRL, ctrl_reg); | ||
1284 | e1e_flush(); | ||
1285 | udelay(500); | ||
1286 | |||
1287 | return 0; | ||
1288 | } | ||
1289 | |||
1290 | /* Specific PHY configuration for loopback */ | ||
1291 | switch (hw->phy.type) { | ||
1292 | case e1000_phy_m88: | ||
1293 | /* Auto-MDI/MDIX Off */ | ||
1294 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | ||
1295 | /* reset to update Auto-MDI/MDIX */ | ||
1296 | e1e_wphy(hw, PHY_CONTROL, 0x9140); | ||
1297 | /* autoneg off */ | ||
1298 | e1e_wphy(hw, PHY_CONTROL, 0x8140); | ||
1299 | break; | ||
1300 | case e1000_phy_gg82563: | ||
1301 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); | ||
1302 | break; | ||
1303 | case e1000_phy_bm: | ||
1304 | /* Set Default MAC Interface speed to 1GB */ | ||
1305 | e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); | ||
1306 | phy_reg &= ~0x0007; | ||
1307 | phy_reg |= 0x006; | ||
1308 | e1e_wphy(hw, PHY_REG(2, 21), phy_reg); | ||
1309 | /* Assert SW reset for above settings to take effect */ | ||
1310 | e1000e_commit_phy(hw); | ||
1311 | mdelay(1); | ||
1312 | /* Force Full Duplex */ | ||
1313 | e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); | ||
1314 | e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); | ||
1315 | /* Set Link Up (in force link) */ | ||
1316 | e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); | ||
1317 | e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); | ||
1318 | /* Force Link */ | ||
1319 | e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); | ||
1320 | e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); | ||
1321 | /* Set Early Link Enable */ | ||
1322 | e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); | ||
1323 | e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); | ||
1324 | break; | ||
1325 | case e1000_phy_82577: | ||
1326 | case e1000_phy_82578: | ||
1327 | /* Workaround: K1 must be disabled for stable 1Gbps operation */ | ||
1328 | ret_val = hw->phy.ops.acquire(hw); | ||
1329 | if (ret_val) { | ||
1330 | e_err("Cannot setup 1Gbps loopback.\n"); | ||
1331 | return ret_val; | ||
1332 | } | ||
1333 | e1000_configure_k1_ich8lan(hw, false); | ||
1334 | hw->phy.ops.release(hw); | ||
1335 | break; | ||
1336 | case e1000_phy_82579: | ||
1337 | /* Disable PHY energy detect power down */ | ||
1338 | e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); | ||
1339 | e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); | ||
1340 | /* Disable full chip energy detect */ | ||
1341 | e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); | ||
1342 | e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); | ||
1343 | /* Enable loopback on the PHY */ | ||
1344 | #define I82577_PHY_LBK_CTRL 19 | ||
1345 | e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); | ||
1346 | break; | ||
1347 | default: | ||
1348 | break; | ||
1349 | } | ||
1350 | |||
1351 | /* force 1000, set loopback */ | ||
1352 | e1e_wphy(hw, PHY_CONTROL, 0x4140); | ||
1353 | mdelay(250); | ||
1354 | |||
1355 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1356 | ctrl_reg = er32(CTRL); | ||
1357 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1358 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1359 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1360 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | ||
1361 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1362 | |||
1363 | if (adapter->flags & FLAG_IS_ICH) | ||
1364 | ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ | ||
1365 | |||
1366 | if (hw->phy.media_type == e1000_media_type_copper && | ||
1367 | hw->phy.type == e1000_phy_m88) { | ||
1368 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | ||
1369 | } else { | ||
1370 | /* | ||
1371 | * Set the ILOS bit on the fiber Nic if half duplex link is | ||
1372 | * detected. | ||
1373 | */ | ||
1374 | if ((er32(STATUS) & E1000_STATUS_FD) == 0) | ||
1375 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | ||
1376 | } | ||
1377 | |||
1378 | ew32(CTRL, ctrl_reg); | ||
1379 | |||
1380 | /* | ||
1381 | * Disable the receiver on the PHY so when a cable is plugged in, the | ||
1382 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | ||
1383 | */ | ||
1384 | if (hw->phy.type == e1000_phy_m88) | ||
1385 | e1000_phy_disable_receiver(adapter); | ||
1386 | |||
1387 | udelay(500); | ||
1388 | |||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) | ||
1393 | { | ||
1394 | struct e1000_hw *hw = &adapter->hw; | ||
1395 | u32 ctrl = er32(CTRL); | ||
1396 | int link = 0; | ||
1397 | |||
1398 | /* special requirements for 82571/82572 fiber adapters */ | ||
1399 | |||
1400 | /* | ||
1401 | * jump through hoops to make sure link is up because serdes | ||
1402 | * link is hardwired up | ||
1403 | */ | ||
1404 | ctrl |= E1000_CTRL_SLU; | ||
1405 | ew32(CTRL, ctrl); | ||
1406 | |||
1407 | /* disable autoneg */ | ||
1408 | ctrl = er32(TXCW); | ||
1409 | ctrl &= ~(1 << 31); | ||
1410 | ew32(TXCW, ctrl); | ||
1411 | |||
1412 | link = (er32(STATUS) & E1000_STATUS_LU); | ||
1413 | |||
1414 | if (!link) { | ||
1415 | /* set invert loss of signal */ | ||
1416 | ctrl = er32(CTRL); | ||
1417 | ctrl |= E1000_CTRL_ILOS; | ||
1418 | ew32(CTRL, ctrl); | ||
1419 | } | ||
1420 | |||
1421 | /* | ||
1422 | * special write to serdes control register to enable SerDes analog | ||
1423 | * loopback | ||
1424 | */ | ||
1425 | #define E1000_SERDES_LB_ON 0x410 | ||
1426 | ew32(SCTL, E1000_SERDES_LB_ON); | ||
1427 | e1e_flush(); | ||
1428 | usleep_range(10000, 20000); | ||
1429 | |||
1430 | return 0; | ||
1431 | } | ||
1432 | |||
1433 | /* only call this for fiber/serdes connections to es2lan */ | ||
1434 | static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) | ||
1435 | { | ||
1436 | struct e1000_hw *hw = &adapter->hw; | ||
1437 | u32 ctrlext = er32(CTRL_EXT); | ||
1438 | u32 ctrl = er32(CTRL); | ||
1439 | |||
1440 | /* | ||
1441 | * save CTRL_EXT to restore later, reuse an empty variable (unused | ||
1442 | * on mac_type 80003es2lan) | ||
1443 | */ | ||
1444 | adapter->tx_fifo_head = ctrlext; | ||
1445 | |||
1446 | /* clear the serdes mode bits, putting the device into mac loopback */ | ||
1447 | ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; | ||
1448 | ew32(CTRL_EXT, ctrlext); | ||
1449 | |||
1450 | /* force speed to 1000/FD, link up */ | ||
1451 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | ||
1452 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | | ||
1453 | E1000_CTRL_SPD_1000 | E1000_CTRL_FD); | ||
1454 | ew32(CTRL, ctrl); | ||
1455 | |||
1456 | /* set mac loopback */ | ||
1457 | ctrl = er32(RCTL); | ||
1458 | ctrl |= E1000_RCTL_LBM_MAC; | ||
1459 | ew32(RCTL, ctrl); | ||
1460 | |||
1461 | /* set testing mode parameters (no need to reset later) */ | ||
1462 | #define KMRNCTRLSTA_OPMODE (0x1F << 16) | ||
1463 | #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 | ||
1464 | ew32(KMRNCTRLSTA, | ||
1465 | (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); | ||
1466 | |||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | static int e1000_setup_loopback_test(struct e1000_adapter *adapter) | ||
1471 | { | ||
1472 | struct e1000_hw *hw = &adapter->hw; | ||
1473 | u32 rctl; | ||
1474 | |||
1475 | if (hw->phy.media_type == e1000_media_type_fiber || | ||
1476 | hw->phy.media_type == e1000_media_type_internal_serdes) { | ||
1477 | switch (hw->mac.type) { | ||
1478 | case e1000_80003es2lan: | ||
1479 | return e1000_set_es2lan_mac_loopback(adapter); | ||
1480 | break; | ||
1481 | case e1000_82571: | ||
1482 | case e1000_82572: | ||
1483 | return e1000_set_82571_fiber_loopback(adapter); | ||
1484 | break; | ||
1485 | default: | ||
1486 | rctl = er32(RCTL); | ||
1487 | rctl |= E1000_RCTL_LBM_TCVR; | ||
1488 | ew32(RCTL, rctl); | ||
1489 | return 0; | ||
1490 | } | ||
1491 | } else if (hw->phy.media_type == e1000_media_type_copper) { | ||
1492 | return e1000_integrated_phy_loopback(adapter); | ||
1493 | } | ||
1494 | |||
1495 | return 7; | ||
1496 | } | ||
1497 | |||
1498 | static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | ||
1499 | { | ||
1500 | struct e1000_hw *hw = &adapter->hw; | ||
1501 | u32 rctl; | ||
1502 | u16 phy_reg; | ||
1503 | |||
1504 | rctl = er32(RCTL); | ||
1505 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | ||
1506 | ew32(RCTL, rctl); | ||
1507 | |||
1508 | switch (hw->mac.type) { | ||
1509 | case e1000_80003es2lan: | ||
1510 | if (hw->phy.media_type == e1000_media_type_fiber || | ||
1511 | hw->phy.media_type == e1000_media_type_internal_serdes) { | ||
1512 | /* restore CTRL_EXT, stealing space from tx_fifo_head */ | ||
1513 | ew32(CTRL_EXT, adapter->tx_fifo_head); | ||
1514 | adapter->tx_fifo_head = 0; | ||
1515 | } | ||
1516 | /* fall through */ | ||
1517 | case e1000_82571: | ||
1518 | case e1000_82572: | ||
1519 | if (hw->phy.media_type == e1000_media_type_fiber || | ||
1520 | hw->phy.media_type == e1000_media_type_internal_serdes) { | ||
1521 | #define E1000_SERDES_LB_OFF 0x400 | ||
1522 | ew32(SCTL, E1000_SERDES_LB_OFF); | ||
1523 | e1e_flush(); | ||
1524 | usleep_range(10000, 20000); | ||
1525 | break; | ||
1526 | } | ||
1527 | /* Fall Through */ | ||
1528 | default: | ||
1529 | hw->mac.autoneg = 1; | ||
1530 | if (hw->phy.type == e1000_phy_gg82563) | ||
1531 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); | ||
1532 | e1e_rphy(hw, PHY_CONTROL, &phy_reg); | ||
1533 | if (phy_reg & MII_CR_LOOPBACK) { | ||
1534 | phy_reg &= ~MII_CR_LOOPBACK; | ||
1535 | e1e_wphy(hw, PHY_CONTROL, phy_reg); | ||
1536 | e1000e_commit_phy(hw); | ||
1537 | } | ||
1538 | break; | ||
1539 | } | ||
1540 | } | ||
1541 | |||
1542 | static void e1000_create_lbtest_frame(struct sk_buff *skb, | ||
1543 | unsigned int frame_size) | ||
1544 | { | ||
1545 | memset(skb->data, 0xFF, frame_size); | ||
1546 | frame_size &= ~1; | ||
1547 | memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); | ||
1548 | memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); | ||
1549 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); | ||
1550 | } | ||
1551 | |||
1552 | static int e1000_check_lbtest_frame(struct sk_buff *skb, | ||
1553 | unsigned int frame_size) | ||
1554 | { | ||
1555 | frame_size &= ~1; | ||
1556 | if (*(skb->data + 3) == 0xFF) | ||
1557 | if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && | ||
1558 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) | ||
1559 | return 0; | ||
1560 | return 13; | ||
1561 | } | ||
1562 | |||
1563 | static int e1000_run_loopback_test(struct e1000_adapter *adapter) | ||
1564 | { | ||
1565 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
1566 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
1567 | struct pci_dev *pdev = adapter->pdev; | ||
1568 | struct e1000_hw *hw = &adapter->hw; | ||
1569 | int i, j, k, l; | ||
1570 | int lc; | ||
1571 | int good_cnt; | ||
1572 | int ret_val = 0; | ||
1573 | unsigned long time; | ||
1574 | |||
1575 | ew32(RDT, rx_ring->count - 1); | ||
1576 | |||
1577 | /* | ||
1578 | * Calculate the loop count based on the largest descriptor ring | ||
1579 | * The idea is to wrap the largest ring a number of times using 64 | ||
1580 | * send/receive pairs during each loop | ||
1581 | */ | ||
1582 | |||
1583 | if (rx_ring->count <= tx_ring->count) | ||
1584 | lc = ((tx_ring->count / 64) * 2) + 1; | ||
1585 | else | ||
1586 | lc = ((rx_ring->count / 64) * 2) + 1; | ||
1587 | |||
1588 | k = 0; | ||
1589 | l = 0; | ||
1590 | for (j = 0; j <= lc; j++) { /* loop count loop */ | ||
1591 | for (i = 0; i < 64; i++) { /* send the packets */ | ||
1592 | e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, | ||
1593 | 1024); | ||
1594 | dma_sync_single_for_device(&pdev->dev, | ||
1595 | tx_ring->buffer_info[k].dma, | ||
1596 | tx_ring->buffer_info[k].length, | ||
1597 | DMA_TO_DEVICE); | ||
1598 | k++; | ||
1599 | if (k == tx_ring->count) | ||
1600 | k = 0; | ||
1601 | } | ||
1602 | ew32(TDT, k); | ||
1603 | e1e_flush(); | ||
1604 | msleep(200); | ||
1605 | time = jiffies; /* set the start time for the receive */ | ||
1606 | good_cnt = 0; | ||
1607 | do { /* receive the sent packets */ | ||
1608 | dma_sync_single_for_cpu(&pdev->dev, | ||
1609 | rx_ring->buffer_info[l].dma, 2048, | ||
1610 | DMA_FROM_DEVICE); | ||
1611 | |||
1612 | ret_val = e1000_check_lbtest_frame( | ||
1613 | rx_ring->buffer_info[l].skb, 1024); | ||
1614 | if (!ret_val) | ||
1615 | good_cnt++; | ||
1616 | l++; | ||
1617 | if (l == rx_ring->count) | ||
1618 | l = 0; | ||
1619 | /* | ||
1620 | * time + 20 msecs (200 msecs on 2.4) is more than | ||
1621 | * enough time to complete the receives, if it's | ||
1622 | * exceeded, break and error off | ||
1623 | */ | ||
1624 | } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); | ||
1625 | if (good_cnt != 64) { | ||
1626 | ret_val = 13; /* ret_val is the same as mis-compare */ | ||
1627 | break; | ||
1628 | } | ||
1629 | if (jiffies >= (time + 20)) { | ||
1630 | ret_val = 14; /* error code for time out error */ | ||
1631 | break; | ||
1632 | } | ||
1633 | } /* end loop count loop */ | ||
1634 | return ret_val; | ||
1635 | } | ||
1636 | |||
1637 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | ||
1638 | { | ||
1639 | /* | ||
1640 | * PHY loopback cannot be performed if SoL/IDER | ||
1641 | * sessions are active | ||
1642 | */ | ||
1643 | if (e1000_check_reset_block(&adapter->hw)) { | ||
1644 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | ||
1645 | *data = 0; | ||
1646 | goto out; | ||
1647 | } | ||
1648 | |||
1649 | *data = e1000_setup_desc_rings(adapter); | ||
1650 | if (*data) | ||
1651 | goto out; | ||
1652 | |||
1653 | *data = e1000_setup_loopback_test(adapter); | ||
1654 | if (*data) | ||
1655 | goto err_loopback; | ||
1656 | |||
1657 | *data = e1000_run_loopback_test(adapter); | ||
1658 | e1000_loopback_cleanup(adapter); | ||
1659 | |||
1660 | err_loopback: | ||
1661 | e1000_free_desc_rings(adapter); | ||
1662 | out: | ||
1663 | return *data; | ||
1664 | } | ||
1665 | |||
1666 | static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) | ||
1667 | { | ||
1668 | struct e1000_hw *hw = &adapter->hw; | ||
1669 | |||
1670 | *data = 0; | ||
1671 | if (hw->phy.media_type == e1000_media_type_internal_serdes) { | ||
1672 | int i = 0; | ||
1673 | hw->mac.serdes_has_link = false; | ||
1674 | |||
1675 | /* | ||
1676 | * On some blade server designs, link establishment | ||
1677 | * could take as long as 2-3 minutes | ||
1678 | */ | ||
1679 | do { | ||
1680 | hw->mac.ops.check_for_link(hw); | ||
1681 | if (hw->mac.serdes_has_link) | ||
1682 | return *data; | ||
1683 | msleep(20); | ||
1684 | } while (i++ < 3750); | ||
1685 | |||
1686 | *data = 1; | ||
1687 | } else { | ||
1688 | hw->mac.ops.check_for_link(hw); | ||
1689 | if (hw->mac.autoneg) | ||
1690 | /* | ||
1691 | * On some Phy/switch combinations, link establishment | ||
1692 | * can take a few seconds more than expected. | ||
1693 | */ | ||
1694 | msleep(5000); | ||
1695 | |||
1696 | if (!(er32(STATUS) & E1000_STATUS_LU)) | ||
1697 | *data = 1; | ||
1698 | } | ||
1699 | return *data; | ||
1700 | } | ||
1701 | |||
1702 | static int e1000e_get_sset_count(struct net_device *netdev, int sset) | ||
1703 | { | ||
1704 | switch (sset) { | ||
1705 | case ETH_SS_TEST: | ||
1706 | return E1000_TEST_LEN; | ||
1707 | case ETH_SS_STATS: | ||
1708 | return E1000_STATS_LEN; | ||
1709 | default: | ||
1710 | return -EOPNOTSUPP; | ||
1711 | } | ||
1712 | } | ||
1713 | |||
1714 | static void e1000_diag_test(struct net_device *netdev, | ||
1715 | struct ethtool_test *eth_test, u64 *data) | ||
1716 | { | ||
1717 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1718 | u16 autoneg_advertised; | ||
1719 | u8 forced_speed_duplex; | ||
1720 | u8 autoneg; | ||
1721 | bool if_running = netif_running(netdev); | ||
1722 | |||
1723 | set_bit(__E1000_TESTING, &adapter->state); | ||
1724 | |||
1725 | if (!if_running) { | ||
1726 | /* Get control of and reset hardware */ | ||
1727 | if (adapter->flags & FLAG_HAS_AMT) | ||
1728 | e1000e_get_hw_control(adapter); | ||
1729 | |||
1730 | e1000e_power_up_phy(adapter); | ||
1731 | |||
1732 | adapter->hw.phy.autoneg_wait_to_complete = 1; | ||
1733 | e1000e_reset(adapter); | ||
1734 | adapter->hw.phy.autoneg_wait_to_complete = 0; | ||
1735 | } | ||
1736 | |||
1737 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | ||
1738 | /* Offline tests */ | ||
1739 | |||
1740 | /* save speed, duplex, autoneg settings */ | ||
1741 | autoneg_advertised = adapter->hw.phy.autoneg_advertised; | ||
1742 | forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; | ||
1743 | autoneg = adapter->hw.mac.autoneg; | ||
1744 | |||
1745 | e_info("offline testing starting\n"); | ||
1746 | |||
1747 | if (if_running) | ||
1748 | /* indicate we're in test mode */ | ||
1749 | dev_close(netdev); | ||
1750 | |||
1751 | if (e1000_reg_test(adapter, &data[0])) | ||
1752 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1753 | |||
1754 | e1000e_reset(adapter); | ||
1755 | if (e1000_eeprom_test(adapter, &data[1])) | ||
1756 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1757 | |||
1758 | e1000e_reset(adapter); | ||
1759 | if (e1000_intr_test(adapter, &data[2])) | ||
1760 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1761 | |||
1762 | e1000e_reset(adapter); | ||
1763 | if (e1000_loopback_test(adapter, &data[3])) | ||
1764 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1765 | |||
1766 | /* force this routine to wait until autoneg complete/timeout */ | ||
1767 | adapter->hw.phy.autoneg_wait_to_complete = 1; | ||
1768 | e1000e_reset(adapter); | ||
1769 | adapter->hw.phy.autoneg_wait_to_complete = 0; | ||
1770 | |||
1771 | if (e1000_link_test(adapter, &data[4])) | ||
1772 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1773 | |||
1774 | /* restore speed, duplex, autoneg settings */ | ||
1775 | adapter->hw.phy.autoneg_advertised = autoneg_advertised; | ||
1776 | adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; | ||
1777 | adapter->hw.mac.autoneg = autoneg; | ||
1778 | e1000e_reset(adapter); | ||
1779 | |||
1780 | clear_bit(__E1000_TESTING, &adapter->state); | ||
1781 | if (if_running) | ||
1782 | dev_open(netdev); | ||
1783 | } else { | ||
1784 | /* Online tests */ | ||
1785 | |||
1786 | e_info("online testing starting\n"); | ||
1787 | |||
1788 | /* register, eeprom, intr and loopback tests not run online */ | ||
1789 | data[0] = 0; | ||
1790 | data[1] = 0; | ||
1791 | data[2] = 0; | ||
1792 | data[3] = 0; | ||
1793 | |||
1794 | if (e1000_link_test(adapter, &data[4])) | ||
1795 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1796 | |||
1797 | clear_bit(__E1000_TESTING, &adapter->state); | ||
1798 | } | ||
1799 | |||
1800 | if (!if_running) { | ||
1801 | e1000e_reset(adapter); | ||
1802 | |||
1803 | if (adapter->flags & FLAG_HAS_AMT) | ||
1804 | e1000e_release_hw_control(adapter); | ||
1805 | } | ||
1806 | |||
1807 | msleep_interruptible(4 * 1000); | ||
1808 | } | ||
1809 | |||
1810 | static void e1000_get_wol(struct net_device *netdev, | ||
1811 | struct ethtool_wolinfo *wol) | ||
1812 | { | ||
1813 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1814 | |||
1815 | wol->supported = 0; | ||
1816 | wol->wolopts = 0; | ||
1817 | |||
1818 | if (!(adapter->flags & FLAG_HAS_WOL) || | ||
1819 | !device_can_wakeup(&adapter->pdev->dev)) | ||
1820 | return; | ||
1821 | |||
1822 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
1823 | WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; | ||
1824 | |||
1825 | /* apply any specific unsupported masks here */ | ||
1826 | if (adapter->flags & FLAG_NO_WAKE_UCAST) { | ||
1827 | wol->supported &= ~WAKE_UCAST; | ||
1828 | |||
1829 | if (adapter->wol & E1000_WUFC_EX) | ||
1830 | e_err("Interface does not support directed (unicast) " | ||
1831 | "frame wake-up packets\n"); | ||
1832 | } | ||
1833 | |||
1834 | if (adapter->wol & E1000_WUFC_EX) | ||
1835 | wol->wolopts |= WAKE_UCAST; | ||
1836 | if (adapter->wol & E1000_WUFC_MC) | ||
1837 | wol->wolopts |= WAKE_MCAST; | ||
1838 | if (adapter->wol & E1000_WUFC_BC) | ||
1839 | wol->wolopts |= WAKE_BCAST; | ||
1840 | if (adapter->wol & E1000_WUFC_MAG) | ||
1841 | wol->wolopts |= WAKE_MAGIC; | ||
1842 | if (adapter->wol & E1000_WUFC_LNKC) | ||
1843 | wol->wolopts |= WAKE_PHY; | ||
1844 | } | ||
1845 | |||
1846 | static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
1847 | { | ||
1848 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1849 | |||
1850 | if (!(adapter->flags & FLAG_HAS_WOL) || | ||
1851 | !device_can_wakeup(&adapter->pdev->dev) || | ||
1852 | (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | | ||
1853 | WAKE_MAGIC | WAKE_PHY))) | ||
1854 | return -EOPNOTSUPP; | ||
1855 | |||
1856 | /* these settings will always override what we currently have */ | ||
1857 | adapter->wol = 0; | ||
1858 | |||
1859 | if (wol->wolopts & WAKE_UCAST) | ||
1860 | adapter->wol |= E1000_WUFC_EX; | ||
1861 | if (wol->wolopts & WAKE_MCAST) | ||
1862 | adapter->wol |= E1000_WUFC_MC; | ||
1863 | if (wol->wolopts & WAKE_BCAST) | ||
1864 | adapter->wol |= E1000_WUFC_BC; | ||
1865 | if (wol->wolopts & WAKE_MAGIC) | ||
1866 | adapter->wol |= E1000_WUFC_MAG; | ||
1867 | if (wol->wolopts & WAKE_PHY) | ||
1868 | adapter->wol |= E1000_WUFC_LNKC; | ||
1869 | |||
1870 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
1871 | |||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | static int e1000_set_phys_id(struct net_device *netdev, | ||
1876 | enum ethtool_phys_id_state state) | ||
1877 | { | ||
1878 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1879 | struct e1000_hw *hw = &adapter->hw; | ||
1880 | |||
1881 | switch (state) { | ||
1882 | case ETHTOOL_ID_ACTIVE: | ||
1883 | if (!hw->mac.ops.blink_led) | ||
1884 | return 2; /* cycle on/off twice per second */ | ||
1885 | |||
1886 | hw->mac.ops.blink_led(hw); | ||
1887 | break; | ||
1888 | |||
1889 | case ETHTOOL_ID_INACTIVE: | ||
1890 | if (hw->phy.type == e1000_phy_ife) | ||
1891 | e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
1892 | hw->mac.ops.led_off(hw); | ||
1893 | hw->mac.ops.cleanup_led(hw); | ||
1894 | break; | ||
1895 | |||
1896 | case ETHTOOL_ID_ON: | ||
1897 | adapter->hw.mac.ops.led_on(&adapter->hw); | ||
1898 | break; | ||
1899 | |||
1900 | case ETHTOOL_ID_OFF: | ||
1901 | adapter->hw.mac.ops.led_off(&adapter->hw); | ||
1902 | break; | ||
1903 | } | ||
1904 | return 0; | ||
1905 | } | ||
1906 | |||
1907 | static int e1000_get_coalesce(struct net_device *netdev, | ||
1908 | struct ethtool_coalesce *ec) | ||
1909 | { | ||
1910 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1911 | |||
1912 | if (adapter->itr_setting <= 4) | ||
1913 | ec->rx_coalesce_usecs = adapter->itr_setting; | ||
1914 | else | ||
1915 | ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; | ||
1916 | |||
1917 | return 0; | ||
1918 | } | ||
1919 | |||
1920 | static int e1000_set_coalesce(struct net_device *netdev, | ||
1921 | struct ethtool_coalesce *ec) | ||
1922 | { | ||
1923 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1924 | struct e1000_hw *hw = &adapter->hw; | ||
1925 | |||
1926 | if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || | ||
1927 | ((ec->rx_coalesce_usecs > 4) && | ||
1928 | (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || | ||
1929 | (ec->rx_coalesce_usecs == 2)) | ||
1930 | return -EINVAL; | ||
1931 | |||
1932 | if (ec->rx_coalesce_usecs == 4) { | ||
1933 | adapter->itr = adapter->itr_setting = 4; | ||
1934 | } else if (ec->rx_coalesce_usecs <= 3) { | ||
1935 | adapter->itr = 20000; | ||
1936 | adapter->itr_setting = ec->rx_coalesce_usecs; | ||
1937 | } else { | ||
1938 | adapter->itr = (1000000 / ec->rx_coalesce_usecs); | ||
1939 | adapter->itr_setting = adapter->itr & ~3; | ||
1940 | } | ||
1941 | |||
1942 | if (adapter->itr_setting != 0) | ||
1943 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | ||
1944 | else | ||
1945 | ew32(ITR, 0); | ||
1946 | |||
1947 | return 0; | ||
1948 | } | ||
1949 | |||
1950 | static int e1000_nway_reset(struct net_device *netdev) | ||
1951 | { | ||
1952 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1953 | |||
1954 | if (!netif_running(netdev)) | ||
1955 | return -EAGAIN; | ||
1956 | |||
1957 | if (!adapter->hw.mac.autoneg) | ||
1958 | return -EINVAL; | ||
1959 | |||
1960 | e1000e_reinit_locked(adapter); | ||
1961 | |||
1962 | return 0; | ||
1963 | } | ||
1964 | |||
1965 | static void e1000_get_ethtool_stats(struct net_device *netdev, | ||
1966 | struct ethtool_stats *stats, | ||
1967 | u64 *data) | ||
1968 | { | ||
1969 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1970 | struct rtnl_link_stats64 net_stats; | ||
1971 | int i; | ||
1972 | char *p = NULL; | ||
1973 | |||
1974 | e1000e_get_stats64(netdev, &net_stats); | ||
1975 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | ||
1976 | switch (e1000_gstrings_stats[i].type) { | ||
1977 | case NETDEV_STATS: | ||
1978 | p = (char *) &net_stats + | ||
1979 | e1000_gstrings_stats[i].stat_offset; | ||
1980 | break; | ||
1981 | case E1000_STATS: | ||
1982 | p = (char *) adapter + | ||
1983 | e1000_gstrings_stats[i].stat_offset; | ||
1984 | break; | ||
1985 | default: | ||
1986 | data[i] = 0; | ||
1987 | continue; | ||
1988 | } | ||
1989 | |||
1990 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | ||
1991 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
1992 | } | ||
1993 | } | ||
1994 | |||
1995 | static void e1000_get_strings(struct net_device *netdev, u32 stringset, | ||
1996 | u8 *data) | ||
1997 | { | ||
1998 | u8 *p = data; | ||
1999 | int i; | ||
2000 | |||
2001 | switch (stringset) { | ||
2002 | case ETH_SS_TEST: | ||
2003 | memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); | ||
2004 | break; | ||
2005 | case ETH_SS_STATS: | ||
2006 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | ||
2007 | memcpy(p, e1000_gstrings_stats[i].stat_string, | ||
2008 | ETH_GSTRING_LEN); | ||
2009 | p += ETH_GSTRING_LEN; | ||
2010 | } | ||
2011 | break; | ||
2012 | } | ||
2013 | } | ||
2014 | |||
2015 | static int e1000e_set_flags(struct net_device *netdev, u32 data) | ||
2016 | { | ||
2017 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
2018 | bool need_reset = false; | ||
2019 | int rc; | ||
2020 | |||
2021 | need_reset = (data & ETH_FLAG_RXVLAN) != | ||
2022 | (netdev->features & NETIF_F_HW_VLAN_RX); | ||
2023 | |||
2024 | rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | | ||
2025 | ETH_FLAG_TXVLAN); | ||
2026 | |||
2027 | if (rc) | ||
2028 | return rc; | ||
2029 | |||
2030 | if (need_reset) { | ||
2031 | if (netif_running(netdev)) | ||
2032 | e1000e_reinit_locked(adapter); | ||
2033 | else | ||
2034 | e1000e_reset(adapter); | ||
2035 | } | ||
2036 | |||
2037 | return 0; | ||
2038 | } | ||
2039 | |||
2040 | static const struct ethtool_ops e1000_ethtool_ops = { | ||
2041 | .get_settings = e1000_get_settings, | ||
2042 | .set_settings = e1000_set_settings, | ||
2043 | .get_drvinfo = e1000_get_drvinfo, | ||
2044 | .get_regs_len = e1000_get_regs_len, | ||
2045 | .get_regs = e1000_get_regs, | ||
2046 | .get_wol = e1000_get_wol, | ||
2047 | .set_wol = e1000_set_wol, | ||
2048 | .get_msglevel = e1000_get_msglevel, | ||
2049 | .set_msglevel = e1000_set_msglevel, | ||
2050 | .nway_reset = e1000_nway_reset, | ||
2051 | .get_link = ethtool_op_get_link, | ||
2052 | .get_eeprom_len = e1000_get_eeprom_len, | ||
2053 | .get_eeprom = e1000_get_eeprom, | ||
2054 | .set_eeprom = e1000_set_eeprom, | ||
2055 | .get_ringparam = e1000_get_ringparam, | ||
2056 | .set_ringparam = e1000_set_ringparam, | ||
2057 | .get_pauseparam = e1000_get_pauseparam, | ||
2058 | .set_pauseparam = e1000_set_pauseparam, | ||
2059 | .get_rx_csum = e1000_get_rx_csum, | ||
2060 | .set_rx_csum = e1000_set_rx_csum, | ||
2061 | .get_tx_csum = e1000_get_tx_csum, | ||
2062 | .set_tx_csum = e1000_set_tx_csum, | ||
2063 | .get_sg = ethtool_op_get_sg, | ||
2064 | .set_sg = ethtool_op_set_sg, | ||
2065 | .get_tso = ethtool_op_get_tso, | ||
2066 | .set_tso = e1000_set_tso, | ||
2067 | .self_test = e1000_diag_test, | ||
2068 | .get_strings = e1000_get_strings, | ||
2069 | .set_phys_id = e1000_set_phys_id, | ||
2070 | .get_ethtool_stats = e1000_get_ethtool_stats, | ||
2071 | .get_sset_count = e1000e_get_sset_count, | ||
2072 | .get_coalesce = e1000_get_coalesce, | ||
2073 | .set_coalesce = e1000_set_coalesce, | ||
2074 | .get_flags = ethtool_op_get_flags, | ||
2075 | .set_flags = e1000e_set_flags, | ||
2076 | }; | ||
2077 | |||
2078 | void e1000e_set_ethtool_ops(struct net_device *netdev) | ||
2079 | { | ||
2080 | SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); | ||
2081 | } | ||
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h new file mode 100644 index 000000000000..29670397079b --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/hw.h | |||
@@ -0,0 +1,984 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _E1000_HW_H_ | ||
30 | #define _E1000_HW_H_ | ||
31 | |||
32 | #include <linux/types.h> | ||
33 | |||
34 | struct e1000_hw; | ||
35 | struct e1000_adapter; | ||
36 | |||
37 | #include "defines.h" | ||
38 | |||
39 | #define er32(reg) __er32(hw, E1000_##reg) | ||
40 | #define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) | ||
41 | #define e1e_flush() er32(STATUS) | ||
42 | |||
43 | #define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ | ||
44 | (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) | ||
45 | |||
46 | #define E1000_READ_REG_ARRAY(a, reg, offset) \ | ||
47 | (readl((a)->hw_addr + reg + ((offset) << 2))) | ||
48 | |||
49 | enum e1e_registers { | ||
50 | E1000_CTRL = 0x00000, /* Device Control - RW */ | ||
51 | E1000_STATUS = 0x00008, /* Device Status - RO */ | ||
52 | E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ | ||
53 | E1000_EERD = 0x00014, /* EEPROM Read - RW */ | ||
54 | E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ | ||
55 | E1000_FLA = 0x0001C, /* Flash Access - RW */ | ||
56 | E1000_MDIC = 0x00020, /* MDI Control - RW */ | ||
57 | E1000_SCTL = 0x00024, /* SerDes Control - RW */ | ||
58 | E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ | ||
59 | E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ | ||
60 | E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ | ||
61 | E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ | ||
62 | E1000_FCT = 0x00030, /* Flow Control Type - RW */ | ||
63 | E1000_VET = 0x00038, /* VLAN Ether Type - RW */ | ||
64 | E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ | ||
65 | E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ | ||
66 | E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ | ||
67 | E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ | ||
68 | E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ | ||
69 | E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ | ||
70 | E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ | ||
71 | E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ | ||
72 | E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ | ||
73 | #define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) | ||
74 | E1000_RCTL = 0x00100, /* Rx Control - RW */ | ||
75 | E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ | ||
76 | E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ | ||
77 | E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ | ||
78 | E1000_TCTL = 0x00400, /* Tx Control - RW */ | ||
79 | E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ | ||
80 | E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ | ||
81 | E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ | ||
82 | E1000_LEDCTL = 0x00E00, /* LED Control - RW */ | ||
83 | E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ | ||
84 | E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ | ||
85 | E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ | ||
86 | #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ | ||
87 | E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ | ||
88 | E1000_PBS = 0x01008, /* Packet Buffer Size */ | ||
89 | E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ | ||
90 | E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ | ||
91 | E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ | ||
92 | E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ | ||
93 | E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ | ||
94 | E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ | ||
95 | E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ | ||
96 | E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ | ||
97 | E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ | ||
98 | E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ | ||
99 | E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ | ||
100 | E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ | ||
101 | E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ | ||
102 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ | ||
103 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ | ||
104 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) | ||
105 | E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ | ||
106 | |||
107 | /* Convenience macros | ||
108 | * | ||
109 | * Note: "_n" is the queue number of the register to be written to. | ||
110 | * | ||
111 | * Example usage: | ||
112 | * E1000_RDBAL_REG(current_rx_queue) | ||
113 | * | ||
114 | */ | ||
115 | #define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) | ||
116 | E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ | ||
117 | E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ | ||
118 | E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ | ||
119 | E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ | ||
120 | E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ | ||
121 | E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ | ||
122 | E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ | ||
123 | E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ | ||
124 | #define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) | ||
125 | E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ | ||
126 | E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ | ||
127 | #define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) | ||
128 | E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ | ||
129 | E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ | ||
130 | E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ | ||
131 | E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ | ||
132 | E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ | ||
133 | E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ | ||
134 | E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ | ||
135 | E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ | ||
136 | E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ | ||
137 | E1000_COLC = 0x04028, /* Collision Count - R/clr */ | ||
138 | E1000_DC = 0x04030, /* Defer Count - R/clr */ | ||
139 | E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ | ||
140 | E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ | ||
141 | E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ | ||
142 | E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ | ||
143 | E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ | ||
144 | E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ | ||
145 | E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ | ||
146 | E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ | ||
147 | E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ | ||
148 | E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ | ||
149 | E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ | ||
150 | E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ | ||
151 | E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ | ||
152 | E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ | ||
153 | E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ | ||
154 | E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ | ||
155 | E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ | ||
156 | E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ | ||
157 | E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ | ||
158 | E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ | ||
159 | E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ | ||
160 | E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ | ||
161 | E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ | ||
162 | E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ | ||
163 | E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ | ||
164 | E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ | ||
165 | E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ | ||
166 | E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ | ||
167 | E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ | ||
168 | E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ | ||
169 | E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ | ||
170 | E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ | ||
171 | E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ | ||
172 | E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ | ||
173 | E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ | ||
174 | E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ | ||
175 | E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ | ||
176 | E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ | ||
177 | E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ | ||
178 | E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ | ||
179 | E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ | ||
180 | E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ | ||
181 | E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ | ||
182 | E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ | ||
183 | E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ | ||
184 | E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ | ||
185 | E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ | ||
186 | E1000_IAC = 0x04100, /* Interrupt Assertion Count */ | ||
187 | E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ | ||
188 | E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ | ||
189 | E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ | ||
190 | E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ | ||
191 | E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ | ||
192 | E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ | ||
193 | E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ | ||
194 | E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ | ||
195 | E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ | ||
196 | E1000_RFCTL = 0x05008, /* Receive Filter Control */ | ||
197 | E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ | ||
198 | E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ | ||
199 | #define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) | ||
200 | #define E1000_RA (E1000_RAL(0)) | ||
201 | E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ | ||
202 | #define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) | ||
203 | E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ | ||
204 | E1000_WUC = 0x05800, /* Wakeup Control - RW */ | ||
205 | E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ | ||
206 | E1000_WUS = 0x05810, /* Wakeup Status - RO */ | ||
207 | E1000_MANC = 0x05820, /* Management Control - RW */ | ||
208 | E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ | ||
209 | E1000_HOST_IF = 0x08800, /* Host Interface */ | ||
210 | |||
211 | E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ | ||
212 | E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ | ||
213 | E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ | ||
214 | #define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) | ||
215 | E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ | ||
216 | E1000_GCR = 0x05B00, /* PCI-Ex Control */ | ||
217 | E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ | ||
218 | E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ | ||
219 | E1000_SWSM = 0x05B50, /* SW Semaphore */ | ||
220 | E1000_FWSM = 0x05B54, /* FW Semaphore */ | ||
221 | E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ | ||
222 | E1000_FFLT_DBG = 0x05F04, /* Debug Register */ | ||
223 | E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ | ||
224 | #define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) | ||
225 | #define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE | ||
226 | E1000_HICR = 0x08F00, /* Host Interface Control */ | ||
227 | }; | ||
228 | |||
229 | #define E1000_MAX_PHY_ADDR 4 | ||
230 | |||
231 | /* IGP01E1000 Specific Registers */ | ||
232 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ | ||
233 | #define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ | ||
234 | #define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ | ||
235 | #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ | ||
236 | #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ | ||
237 | #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ | ||
238 | #define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ | ||
239 | #define IGP_PAGE_SHIFT 5 | ||
240 | #define PHY_REG_MASK 0x1F | ||
241 | |||
242 | #define BM_WUC_PAGE 800 | ||
243 | #define BM_WUC_ADDRESS_OPCODE 0x11 | ||
244 | #define BM_WUC_DATA_OPCODE 0x12 | ||
245 | #define BM_WUC_ENABLE_PAGE 769 | ||
246 | #define BM_WUC_ENABLE_REG 17 | ||
247 | #define BM_WUC_ENABLE_BIT (1 << 2) | ||
248 | #define BM_WUC_HOST_WU_BIT (1 << 4) | ||
249 | #define BM_WUC_ME_WU_BIT (1 << 5) | ||
250 | |||
251 | #define BM_WUC PHY_REG(BM_WUC_PAGE, 1) | ||
252 | #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) | ||
253 | #define BM_WUS PHY_REG(BM_WUC_PAGE, 3) | ||
254 | |||
255 | #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 | ||
256 | #define IGP01E1000_PHY_POLARITY_MASK 0x0078 | ||
257 | |||
258 | #define IGP01E1000_PSCR_AUTO_MDIX 0x1000 | ||
259 | #define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ | ||
260 | |||
261 | #define IGP01E1000_PSCFR_SMART_SPEED 0x0080 | ||
262 | |||
263 | #define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ | ||
264 | #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ | ||
265 | #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ | ||
266 | |||
267 | #define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 | ||
268 | |||
269 | #define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 | ||
270 | #define IGP01E1000_PSSR_MDIX 0x0800 | ||
271 | #define IGP01E1000_PSSR_SPEED_MASK 0xC000 | ||
272 | #define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 | ||
273 | |||
274 | #define IGP02E1000_PHY_CHANNEL_NUM 4 | ||
275 | #define IGP02E1000_PHY_AGC_A 0x11B1 | ||
276 | #define IGP02E1000_PHY_AGC_B 0x12B1 | ||
277 | #define IGP02E1000_PHY_AGC_C 0x14B1 | ||
278 | #define IGP02E1000_PHY_AGC_D 0x18B1 | ||
279 | |||
280 | #define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ | ||
281 | #define IGP02E1000_AGC_LENGTH_MASK 0x7F | ||
282 | #define IGP02E1000_AGC_RANGE 15 | ||
283 | |||
284 | /* manage.c */ | ||
285 | #define E1000_VFTA_ENTRY_SHIFT 5 | ||
286 | #define E1000_VFTA_ENTRY_MASK 0x7F | ||
287 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F | ||
288 | |||
289 | #define E1000_HICR_EN 0x01 /* Enable bit - RO */ | ||
290 | /* Driver sets this bit when done to put command in RAM */ | ||
291 | #define E1000_HICR_C 0x02 | ||
292 | #define E1000_HICR_FW_RESET_ENABLE 0x40 | ||
293 | #define E1000_HICR_FW_RESET 0x80 | ||
294 | |||
295 | #define E1000_FWSM_MODE_MASK 0xE | ||
296 | #define E1000_FWSM_MODE_SHIFT 1 | ||
297 | |||
298 | #define E1000_MNG_IAMT_MODE 0x3 | ||
299 | #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 | ||
300 | #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 | ||
301 | #define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 | ||
302 | #define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 | ||
303 | #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 | ||
304 | #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 | ||
305 | |||
306 | /* nvm.c */ | ||
307 | #define E1000_STM_OPCODE 0xDB00 | ||
308 | |||
309 | #define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 | ||
310 | #define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 | ||
311 | #define E1000_KMRNCTRLSTA_REN 0x00200000 | ||
312 | #define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ | ||
313 | #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ | ||
314 | #define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ | ||
315 | #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ | ||
316 | #define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ | ||
317 | #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ | ||
318 | #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 | ||
319 | #define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 | ||
320 | #define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ | ||
321 | |||
322 | #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 | ||
323 | #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ | ||
324 | #define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ | ||
325 | #define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ | ||
326 | |||
327 | /* IFE PHY Extended Status Control */ | ||
328 | #define IFE_PESC_POLARITY_REVERSED 0x0100 | ||
329 | |||
330 | /* IFE PHY Special Control */ | ||
331 | #define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 | ||
332 | #define IFE_PSC_FORCE_POLARITY 0x0020 | ||
333 | |||
334 | /* IFE PHY Special Control and LED Control */ | ||
335 | #define IFE_PSCL_PROBE_MODE 0x0020 | ||
336 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ | ||
337 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ | ||
338 | |||
339 | /* IFE PHY MDIX Control */ | ||
340 | #define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ | ||
341 | #define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ | ||
342 | #define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ | ||
343 | |||
344 | #define E1000_CABLE_LENGTH_UNDEFINED 0xFF | ||
345 | |||
346 | #define E1000_DEV_ID_82571EB_COPPER 0x105E | ||
347 | #define E1000_DEV_ID_82571EB_FIBER 0x105F | ||
348 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 | ||
349 | #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 | ||
350 | #define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 | ||
351 | #define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 | ||
352 | #define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC | ||
353 | #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 | ||
354 | #define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA | ||
355 | #define E1000_DEV_ID_82572EI_COPPER 0x107D | ||
356 | #define E1000_DEV_ID_82572EI_FIBER 0x107E | ||
357 | #define E1000_DEV_ID_82572EI_SERDES 0x107F | ||
358 | #define E1000_DEV_ID_82572EI 0x10B9 | ||
359 | #define E1000_DEV_ID_82573E 0x108B | ||
360 | #define E1000_DEV_ID_82573E_IAMT 0x108C | ||
361 | #define E1000_DEV_ID_82573L 0x109A | ||
362 | #define E1000_DEV_ID_82574L 0x10D3 | ||
363 | #define E1000_DEV_ID_82574LA 0x10F6 | ||
364 | #define E1000_DEV_ID_82583V 0x150C | ||
365 | |||
366 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | ||
367 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | ||
368 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | ||
369 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | ||
370 | |||
371 | #define E1000_DEV_ID_ICH8_82567V_3 0x1501 | ||
372 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | ||
373 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | ||
374 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | ||
375 | #define E1000_DEV_ID_ICH8_IFE 0x104C | ||
376 | #define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 | ||
377 | #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 | ||
378 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D | ||
379 | #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD | ||
380 | #define E1000_DEV_ID_ICH9_BM 0x10E5 | ||
381 | #define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 | ||
382 | #define E1000_DEV_ID_ICH9_IGP_M 0x10BF | ||
383 | #define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB | ||
384 | #define E1000_DEV_ID_ICH9_IGP_C 0x294C | ||
385 | #define E1000_DEV_ID_ICH9_IFE 0x10C0 | ||
386 | #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 | ||
387 | #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 | ||
388 | #define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC | ||
389 | #define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD | ||
390 | #define E1000_DEV_ID_ICH10_R_BM_V 0x10CE | ||
391 | #define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE | ||
392 | #define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF | ||
393 | #define E1000_DEV_ID_ICH10_D_BM_V 0x1525 | ||
394 | #define E1000_DEV_ID_PCH_M_HV_LM 0x10EA | ||
395 | #define E1000_DEV_ID_PCH_M_HV_LC 0x10EB | ||
396 | #define E1000_DEV_ID_PCH_D_HV_DM 0x10EF | ||
397 | #define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 | ||
398 | #define E1000_DEV_ID_PCH2_LV_LM 0x1502 | ||
399 | #define E1000_DEV_ID_PCH2_LV_V 0x1503 | ||
400 | |||
401 | #define E1000_REVISION_4 4 | ||
402 | |||
403 | #define E1000_FUNC_1 1 | ||
404 | |||
405 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 | ||
406 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 | ||
407 | |||
408 | enum e1000_mac_type { | ||
409 | e1000_82571, | ||
410 | e1000_82572, | ||
411 | e1000_82573, | ||
412 | e1000_82574, | ||
413 | e1000_82583, | ||
414 | e1000_80003es2lan, | ||
415 | e1000_ich8lan, | ||
416 | e1000_ich9lan, | ||
417 | e1000_ich10lan, | ||
418 | e1000_pchlan, | ||
419 | e1000_pch2lan, | ||
420 | }; | ||
421 | |||
422 | enum e1000_media_type { | ||
423 | e1000_media_type_unknown = 0, | ||
424 | e1000_media_type_copper = 1, | ||
425 | e1000_media_type_fiber = 2, | ||
426 | e1000_media_type_internal_serdes = 3, | ||
427 | e1000_num_media_types | ||
428 | }; | ||
429 | |||
430 | enum e1000_nvm_type { | ||
431 | e1000_nvm_unknown = 0, | ||
432 | e1000_nvm_none, | ||
433 | e1000_nvm_eeprom_spi, | ||
434 | e1000_nvm_flash_hw, | ||
435 | e1000_nvm_flash_sw | ||
436 | }; | ||
437 | |||
438 | enum e1000_nvm_override { | ||
439 | e1000_nvm_override_none = 0, | ||
440 | e1000_nvm_override_spi_small, | ||
441 | e1000_nvm_override_spi_large | ||
442 | }; | ||
443 | |||
444 | enum e1000_phy_type { | ||
445 | e1000_phy_unknown = 0, | ||
446 | e1000_phy_none, | ||
447 | e1000_phy_m88, | ||
448 | e1000_phy_igp, | ||
449 | e1000_phy_igp_2, | ||
450 | e1000_phy_gg82563, | ||
451 | e1000_phy_igp_3, | ||
452 | e1000_phy_ife, | ||
453 | e1000_phy_bm, | ||
454 | e1000_phy_82578, | ||
455 | e1000_phy_82577, | ||
456 | e1000_phy_82579, | ||
457 | }; | ||
458 | |||
459 | enum e1000_bus_width { | ||
460 | e1000_bus_width_unknown = 0, | ||
461 | e1000_bus_width_pcie_x1, | ||
462 | e1000_bus_width_pcie_x2, | ||
463 | e1000_bus_width_pcie_x4 = 4, | ||
464 | e1000_bus_width_32, | ||
465 | e1000_bus_width_64, | ||
466 | e1000_bus_width_reserved | ||
467 | }; | ||
468 | |||
469 | enum e1000_1000t_rx_status { | ||
470 | e1000_1000t_rx_status_not_ok = 0, | ||
471 | e1000_1000t_rx_status_ok, | ||
472 | e1000_1000t_rx_status_undefined = 0xFF | ||
473 | }; | ||
474 | |||
475 | enum e1000_rev_polarity{ | ||
476 | e1000_rev_polarity_normal = 0, | ||
477 | e1000_rev_polarity_reversed, | ||
478 | e1000_rev_polarity_undefined = 0xFF | ||
479 | }; | ||
480 | |||
481 | enum e1000_fc_mode { | ||
482 | e1000_fc_none = 0, | ||
483 | e1000_fc_rx_pause, | ||
484 | e1000_fc_tx_pause, | ||
485 | e1000_fc_full, | ||
486 | e1000_fc_default = 0xFF | ||
487 | }; | ||
488 | |||
489 | enum e1000_ms_type { | ||
490 | e1000_ms_hw_default = 0, | ||
491 | e1000_ms_force_master, | ||
492 | e1000_ms_force_slave, | ||
493 | e1000_ms_auto | ||
494 | }; | ||
495 | |||
496 | enum e1000_smart_speed { | ||
497 | e1000_smart_speed_default = 0, | ||
498 | e1000_smart_speed_on, | ||
499 | e1000_smart_speed_off | ||
500 | }; | ||
501 | |||
502 | enum e1000_serdes_link_state { | ||
503 | e1000_serdes_link_down = 0, | ||
504 | e1000_serdes_link_autoneg_progress, | ||
505 | e1000_serdes_link_autoneg_complete, | ||
506 | e1000_serdes_link_forced_up | ||
507 | }; | ||
508 | |||
509 | /* Receive Descriptor */ | ||
510 | struct e1000_rx_desc { | ||
511 | __le64 buffer_addr; /* Address of the descriptor's data buffer */ | ||
512 | __le16 length; /* Length of data DMAed into data buffer */ | ||
513 | __le16 csum; /* Packet checksum */ | ||
514 | u8 status; /* Descriptor status */ | ||
515 | u8 errors; /* Descriptor Errors */ | ||
516 | __le16 special; | ||
517 | }; | ||
518 | |||
519 | /* Receive Descriptor - Extended */ | ||
520 | union e1000_rx_desc_extended { | ||
521 | struct { | ||
522 | __le64 buffer_addr; | ||
523 | __le64 reserved; | ||
524 | } read; | ||
525 | struct { | ||
526 | struct { | ||
527 | __le32 mrq; /* Multiple Rx Queues */ | ||
528 | union { | ||
529 | __le32 rss; /* RSS Hash */ | ||
530 | struct { | ||
531 | __le16 ip_id; /* IP id */ | ||
532 | __le16 csum; /* Packet Checksum */ | ||
533 | } csum_ip; | ||
534 | } hi_dword; | ||
535 | } lower; | ||
536 | struct { | ||
537 | __le32 status_error; /* ext status/error */ | ||
538 | __le16 length; | ||
539 | __le16 vlan; /* VLAN tag */ | ||
540 | } upper; | ||
541 | } wb; /* writeback */ | ||
542 | }; | ||
543 | |||
544 | #define MAX_PS_BUFFERS 4 | ||
545 | /* Receive Descriptor - Packet Split */ | ||
546 | union e1000_rx_desc_packet_split { | ||
547 | struct { | ||
548 | /* one buffer for protocol header(s), three data buffers */ | ||
549 | __le64 buffer_addr[MAX_PS_BUFFERS]; | ||
550 | } read; | ||
551 | struct { | ||
552 | struct { | ||
553 | __le32 mrq; /* Multiple Rx Queues */ | ||
554 | union { | ||
555 | __le32 rss; /* RSS Hash */ | ||
556 | struct { | ||
557 | __le16 ip_id; /* IP id */ | ||
558 | __le16 csum; /* Packet Checksum */ | ||
559 | } csum_ip; | ||
560 | } hi_dword; | ||
561 | } lower; | ||
562 | struct { | ||
563 | __le32 status_error; /* ext status/error */ | ||
564 | __le16 length0; /* length of buffer 0 */ | ||
565 | __le16 vlan; /* VLAN tag */ | ||
566 | } middle; | ||
567 | struct { | ||
568 | __le16 header_status; | ||
569 | __le16 length[3]; /* length of buffers 1-3 */ | ||
570 | } upper; | ||
571 | __le64 reserved; | ||
572 | } wb; /* writeback */ | ||
573 | }; | ||
574 | |||
575 | /* Transmit Descriptor */ | ||
576 | struct e1000_tx_desc { | ||
577 | __le64 buffer_addr; /* Address of the descriptor's data buffer */ | ||
578 | union { | ||
579 | __le32 data; | ||
580 | struct { | ||
581 | __le16 length; /* Data buffer length */ | ||
582 | u8 cso; /* Checksum offset */ | ||
583 | u8 cmd; /* Descriptor control */ | ||
584 | } flags; | ||
585 | } lower; | ||
586 | union { | ||
587 | __le32 data; | ||
588 | struct { | ||
589 | u8 status; /* Descriptor status */ | ||
590 | u8 css; /* Checksum start */ | ||
591 | __le16 special; | ||
592 | } fields; | ||
593 | } upper; | ||
594 | }; | ||
595 | |||
596 | /* Offload Context Descriptor */ | ||
597 | struct e1000_context_desc { | ||
598 | union { | ||
599 | __le32 ip_config; | ||
600 | struct { | ||
601 | u8 ipcss; /* IP checksum start */ | ||
602 | u8 ipcso; /* IP checksum offset */ | ||
603 | __le16 ipcse; /* IP checksum end */ | ||
604 | } ip_fields; | ||
605 | } lower_setup; | ||
606 | union { | ||
607 | __le32 tcp_config; | ||
608 | struct { | ||
609 | u8 tucss; /* TCP checksum start */ | ||
610 | u8 tucso; /* TCP checksum offset */ | ||
611 | __le16 tucse; /* TCP checksum end */ | ||
612 | } tcp_fields; | ||
613 | } upper_setup; | ||
614 | __le32 cmd_and_length; | ||
615 | union { | ||
616 | __le32 data; | ||
617 | struct { | ||
618 | u8 status; /* Descriptor status */ | ||
619 | u8 hdr_len; /* Header length */ | ||
620 | __le16 mss; /* Maximum segment size */ | ||
621 | } fields; | ||
622 | } tcp_seg_setup; | ||
623 | }; | ||
624 | |||
625 | /* Offload data descriptor */ | ||
626 | struct e1000_data_desc { | ||
627 | __le64 buffer_addr; /* Address of the descriptor's buffer address */ | ||
628 | union { | ||
629 | __le32 data; | ||
630 | struct { | ||
631 | __le16 length; /* Data buffer length */ | ||
632 | u8 typ_len_ext; | ||
633 | u8 cmd; | ||
634 | } flags; | ||
635 | } lower; | ||
636 | union { | ||
637 | __le32 data; | ||
638 | struct { | ||
639 | u8 status; /* Descriptor status */ | ||
640 | u8 popts; /* Packet Options */ | ||
641 | __le16 special; /* */ | ||
642 | } fields; | ||
643 | } upper; | ||
644 | }; | ||
645 | |||
646 | /* Statistics counters collected by the MAC */ | ||
647 | struct e1000_hw_stats { | ||
648 | u64 crcerrs; | ||
649 | u64 algnerrc; | ||
650 | u64 symerrs; | ||
651 | u64 rxerrc; | ||
652 | u64 mpc; | ||
653 | u64 scc; | ||
654 | u64 ecol; | ||
655 | u64 mcc; | ||
656 | u64 latecol; | ||
657 | u64 colc; | ||
658 | u64 dc; | ||
659 | u64 tncrs; | ||
660 | u64 sec; | ||
661 | u64 cexterr; | ||
662 | u64 rlec; | ||
663 | u64 xonrxc; | ||
664 | u64 xontxc; | ||
665 | u64 xoffrxc; | ||
666 | u64 xofftxc; | ||
667 | u64 fcruc; | ||
668 | u64 prc64; | ||
669 | u64 prc127; | ||
670 | u64 prc255; | ||
671 | u64 prc511; | ||
672 | u64 prc1023; | ||
673 | u64 prc1522; | ||
674 | u64 gprc; | ||
675 | u64 bprc; | ||
676 | u64 mprc; | ||
677 | u64 gptc; | ||
678 | u64 gorc; | ||
679 | u64 gotc; | ||
680 | u64 rnbc; | ||
681 | u64 ruc; | ||
682 | u64 rfc; | ||
683 | u64 roc; | ||
684 | u64 rjc; | ||
685 | u64 mgprc; | ||
686 | u64 mgpdc; | ||
687 | u64 mgptc; | ||
688 | u64 tor; | ||
689 | u64 tot; | ||
690 | u64 tpr; | ||
691 | u64 tpt; | ||
692 | u64 ptc64; | ||
693 | u64 ptc127; | ||
694 | u64 ptc255; | ||
695 | u64 ptc511; | ||
696 | u64 ptc1023; | ||
697 | u64 ptc1522; | ||
698 | u64 mptc; | ||
699 | u64 bptc; | ||
700 | u64 tsctc; | ||
701 | u64 tsctfc; | ||
702 | u64 iac; | ||
703 | u64 icrxptc; | ||
704 | u64 icrxatc; | ||
705 | u64 ictxptc; | ||
706 | u64 ictxatc; | ||
707 | u64 ictxqec; | ||
708 | u64 ictxqmtc; | ||
709 | u64 icrxdmtc; | ||
710 | u64 icrxoc; | ||
711 | }; | ||
712 | |||
713 | struct e1000_phy_stats { | ||
714 | u32 idle_errors; | ||
715 | u32 receive_errors; | ||
716 | }; | ||
717 | |||
718 | struct e1000_host_mng_dhcp_cookie { | ||
719 | u32 signature; | ||
720 | u8 status; | ||
721 | u8 reserved0; | ||
722 | u16 vlan_id; | ||
723 | u32 reserved1; | ||
724 | u16 reserved2; | ||
725 | u8 reserved3; | ||
726 | u8 checksum; | ||
727 | }; | ||
728 | |||
729 | /* Host Interface "Rev 1" */ | ||
730 | struct e1000_host_command_header { | ||
731 | u8 command_id; | ||
732 | u8 command_length; | ||
733 | u8 command_options; | ||
734 | u8 checksum; | ||
735 | }; | ||
736 | |||
737 | #define E1000_HI_MAX_DATA_LENGTH 252 | ||
738 | struct e1000_host_command_info { | ||
739 | struct e1000_host_command_header command_header; | ||
740 | u8 command_data[E1000_HI_MAX_DATA_LENGTH]; | ||
741 | }; | ||
742 | |||
743 | /* Host Interface "Rev 2" */ | ||
744 | struct e1000_host_mng_command_header { | ||
745 | u8 command_id; | ||
746 | u8 checksum; | ||
747 | u16 reserved1; | ||
748 | u16 reserved2; | ||
749 | u16 command_length; | ||
750 | }; | ||
751 | |||
752 | #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 | ||
753 | struct e1000_host_mng_command_info { | ||
754 | struct e1000_host_mng_command_header command_header; | ||
755 | u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; | ||
756 | }; | ||
757 | |||
758 | /* Function pointers and static data for the MAC. */ | ||
759 | struct e1000_mac_operations { | ||
760 | s32 (*id_led_init)(struct e1000_hw *); | ||
761 | s32 (*blink_led)(struct e1000_hw *); | ||
762 | bool (*check_mng_mode)(struct e1000_hw *); | ||
763 | s32 (*check_for_link)(struct e1000_hw *); | ||
764 | s32 (*cleanup_led)(struct e1000_hw *); | ||
765 | void (*clear_hw_cntrs)(struct e1000_hw *); | ||
766 | void (*clear_vfta)(struct e1000_hw *); | ||
767 | s32 (*get_bus_info)(struct e1000_hw *); | ||
768 | void (*set_lan_id)(struct e1000_hw *); | ||
769 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); | ||
770 | s32 (*led_on)(struct e1000_hw *); | ||
771 | s32 (*led_off)(struct e1000_hw *); | ||
772 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); | ||
773 | s32 (*reset_hw)(struct e1000_hw *); | ||
774 | s32 (*init_hw)(struct e1000_hw *); | ||
775 | s32 (*setup_link)(struct e1000_hw *); | ||
776 | s32 (*setup_physical_interface)(struct e1000_hw *); | ||
777 | s32 (*setup_led)(struct e1000_hw *); | ||
778 | void (*write_vfta)(struct e1000_hw *, u32, u32); | ||
779 | s32 (*read_mac_addr)(struct e1000_hw *); | ||
780 | }; | ||
781 | |||
782 | /* | ||
783 | * When to use various PHY register access functions: | ||
784 | * | ||
785 | * Func Caller | ||
786 | * Function Does Does When to use | ||
787 | * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
788 | * X_reg L,P,A n/a for simple PHY reg accesses | ||
789 | * X_reg_locked P,A L for multiple accesses of different regs | ||
790 | * on different pages | ||
791 | * X_reg_page A L,P for multiple accesses of different regs | ||
792 | * on the same page | ||
793 | * | ||
794 | * Where X=[read|write], L=locking, P=sets page, A=register access | ||
795 | * | ||
796 | */ | ||
797 | struct e1000_phy_operations { | ||
798 | s32 (*acquire)(struct e1000_hw *); | ||
799 | s32 (*cfg_on_link_up)(struct e1000_hw *); | ||
800 | s32 (*check_polarity)(struct e1000_hw *); | ||
801 | s32 (*check_reset_block)(struct e1000_hw *); | ||
802 | s32 (*commit)(struct e1000_hw *); | ||
803 | s32 (*force_speed_duplex)(struct e1000_hw *); | ||
804 | s32 (*get_cfg_done)(struct e1000_hw *hw); | ||
805 | s32 (*get_cable_length)(struct e1000_hw *); | ||
806 | s32 (*get_info)(struct e1000_hw *); | ||
807 | s32 (*set_page)(struct e1000_hw *, u16); | ||
808 | s32 (*read_reg)(struct e1000_hw *, u32, u16 *); | ||
809 | s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); | ||
810 | s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); | ||
811 | void (*release)(struct e1000_hw *); | ||
812 | s32 (*reset)(struct e1000_hw *); | ||
813 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); | ||
814 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); | ||
815 | s32 (*write_reg)(struct e1000_hw *, u32, u16); | ||
816 | s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); | ||
817 | s32 (*write_reg_page)(struct e1000_hw *, u32, u16); | ||
818 | void (*power_up)(struct e1000_hw *); | ||
819 | void (*power_down)(struct e1000_hw *); | ||
820 | }; | ||
821 | |||
822 | /* Function pointers for the NVM. */ | ||
823 | struct e1000_nvm_operations { | ||
824 | s32 (*acquire)(struct e1000_hw *); | ||
825 | s32 (*read)(struct e1000_hw *, u16, u16, u16 *); | ||
826 | void (*release)(struct e1000_hw *); | ||
827 | s32 (*update)(struct e1000_hw *); | ||
828 | s32 (*valid_led_default)(struct e1000_hw *, u16 *); | ||
829 | s32 (*validate)(struct e1000_hw *); | ||
830 | s32 (*write)(struct e1000_hw *, u16, u16, u16 *); | ||
831 | }; | ||
832 | |||
833 | struct e1000_mac_info { | ||
834 | struct e1000_mac_operations ops; | ||
835 | u8 addr[ETH_ALEN]; | ||
836 | u8 perm_addr[ETH_ALEN]; | ||
837 | |||
838 | enum e1000_mac_type type; | ||
839 | |||
840 | u32 collision_delta; | ||
841 | u32 ledctl_default; | ||
842 | u32 ledctl_mode1; | ||
843 | u32 ledctl_mode2; | ||
844 | u32 mc_filter_type; | ||
845 | u32 tx_packet_delta; | ||
846 | u32 txcw; | ||
847 | |||
848 | u16 current_ifs_val; | ||
849 | u16 ifs_max_val; | ||
850 | u16 ifs_min_val; | ||
851 | u16 ifs_ratio; | ||
852 | u16 ifs_step_size; | ||
853 | u16 mta_reg_count; | ||
854 | |||
855 | /* Maximum size of the MTA register table in all supported adapters */ | ||
856 | #define MAX_MTA_REG 128 | ||
857 | u32 mta_shadow[MAX_MTA_REG]; | ||
858 | u16 rar_entry_count; | ||
859 | |||
860 | u8 forced_speed_duplex; | ||
861 | |||
862 | bool adaptive_ifs; | ||
863 | bool has_fwsm; | ||
864 | bool arc_subsystem_valid; | ||
865 | bool autoneg; | ||
866 | bool autoneg_failed; | ||
867 | bool get_link_status; | ||
868 | bool in_ifs_mode; | ||
869 | bool serdes_has_link; | ||
870 | bool tx_pkt_filtering; | ||
871 | enum e1000_serdes_link_state serdes_link_state; | ||
872 | }; | ||
873 | |||
874 | struct e1000_phy_info { | ||
875 | struct e1000_phy_operations ops; | ||
876 | |||
877 | enum e1000_phy_type type; | ||
878 | |||
879 | enum e1000_1000t_rx_status local_rx; | ||
880 | enum e1000_1000t_rx_status remote_rx; | ||
881 | enum e1000_ms_type ms_type; | ||
882 | enum e1000_ms_type original_ms_type; | ||
883 | enum e1000_rev_polarity cable_polarity; | ||
884 | enum e1000_smart_speed smart_speed; | ||
885 | |||
886 | u32 addr; | ||
887 | u32 id; | ||
888 | u32 reset_delay_us; /* in usec */ | ||
889 | u32 revision; | ||
890 | |||
891 | enum e1000_media_type media_type; | ||
892 | |||
893 | u16 autoneg_advertised; | ||
894 | u16 autoneg_mask; | ||
895 | u16 cable_length; | ||
896 | u16 max_cable_length; | ||
897 | u16 min_cable_length; | ||
898 | |||
899 | u8 mdix; | ||
900 | |||
901 | bool disable_polarity_correction; | ||
902 | bool is_mdix; | ||
903 | bool polarity_correction; | ||
904 | bool speed_downgraded; | ||
905 | bool autoneg_wait_to_complete; | ||
906 | }; | ||
907 | |||
908 | struct e1000_nvm_info { | ||
909 | struct e1000_nvm_operations ops; | ||
910 | |||
911 | enum e1000_nvm_type type; | ||
912 | enum e1000_nvm_override override; | ||
913 | |||
914 | u32 flash_bank_size; | ||
915 | u32 flash_base_addr; | ||
916 | |||
917 | u16 word_size; | ||
918 | u16 delay_usec; | ||
919 | u16 address_bits; | ||
920 | u16 opcode_bits; | ||
921 | u16 page_size; | ||
922 | }; | ||
923 | |||
924 | struct e1000_bus_info { | ||
925 | enum e1000_bus_width width; | ||
926 | |||
927 | u16 func; | ||
928 | }; | ||
929 | |||
930 | struct e1000_fc_info { | ||
931 | u32 high_water; /* Flow control high-water mark */ | ||
932 | u32 low_water; /* Flow control low-water mark */ | ||
933 | u16 pause_time; /* Flow control pause timer */ | ||
934 | u16 refresh_time; /* Flow control refresh timer */ | ||
935 | bool send_xon; /* Flow control send XON */ | ||
936 | bool strict_ieee; /* Strict IEEE mode */ | ||
937 | enum e1000_fc_mode current_mode; /* FC mode in effect */ | ||
938 | enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ | ||
939 | }; | ||
940 | |||
941 | struct e1000_dev_spec_82571 { | ||
942 | bool laa_is_present; | ||
943 | u32 smb_counter; | ||
944 | }; | ||
945 | |||
946 | struct e1000_dev_spec_80003es2lan { | ||
947 | bool mdic_wa_enable; | ||
948 | }; | ||
949 | |||
950 | struct e1000_shadow_ram { | ||
951 | u16 value; | ||
952 | bool modified; | ||
953 | }; | ||
954 | |||
955 | #define E1000_ICH8_SHADOW_RAM_WORDS 2048 | ||
956 | |||
957 | struct e1000_dev_spec_ich8lan { | ||
958 | bool kmrn_lock_loss_workaround_enabled; | ||
959 | struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; | ||
960 | bool nvm_k1_enabled; | ||
961 | bool eee_disable; | ||
962 | }; | ||
963 | |||
964 | struct e1000_hw { | ||
965 | struct e1000_adapter *adapter; | ||
966 | |||
967 | u8 __iomem *hw_addr; | ||
968 | u8 __iomem *flash_address; | ||
969 | |||
970 | struct e1000_mac_info mac; | ||
971 | struct e1000_fc_info fc; | ||
972 | struct e1000_phy_info phy; | ||
973 | struct e1000_nvm_info nvm; | ||
974 | struct e1000_bus_info bus; | ||
975 | struct e1000_host_mng_dhcp_cookie mng_cookie; | ||
976 | |||
977 | union { | ||
978 | struct e1000_dev_spec_82571 e82571; | ||
979 | struct e1000_dev_spec_80003es2lan e80003es2lan; | ||
980 | struct e1000_dev_spec_ich8lan ich8lan; | ||
981 | } dev_spec; | ||
982 | }; | ||
983 | |||
984 | #endif | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c new file mode 100644 index 000000000000..4e36978b8fd8 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -0,0 +1,4111 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* | ||
30 | * 82562G 10/100 Network Connection | ||
31 | * 82562G-2 10/100 Network Connection | ||
32 | * 82562GT 10/100 Network Connection | ||
33 | * 82562GT-2 10/100 Network Connection | ||
34 | * 82562V 10/100 Network Connection | ||
35 | * 82562V-2 10/100 Network Connection | ||
36 | * 82566DC-2 Gigabit Network Connection | ||
37 | * 82566DC Gigabit Network Connection | ||
38 | * 82566DM-2 Gigabit Network Connection | ||
39 | * 82566DM Gigabit Network Connection | ||
40 | * 82566MC Gigabit Network Connection | ||
41 | * 82566MM Gigabit Network Connection | ||
42 | * 82567LM Gigabit Network Connection | ||
43 | * 82567LF Gigabit Network Connection | ||
44 | * 82567V Gigabit Network Connection | ||
45 | * 82567LM-2 Gigabit Network Connection | ||
46 | * 82567LF-2 Gigabit Network Connection | ||
47 | * 82567V-2 Gigabit Network Connection | ||
48 | * 82567LF-3 Gigabit Network Connection | ||
49 | * 82567LM-3 Gigabit Network Connection | ||
50 | * 82567LM-4 Gigabit Network Connection | ||
51 | * 82577LM Gigabit Network Connection | ||
52 | * 82577LC Gigabit Network Connection | ||
53 | * 82578DM Gigabit Network Connection | ||
54 | * 82578DC Gigabit Network Connection | ||
55 | * 82579LM Gigabit Network Connection | ||
56 | * 82579V Gigabit Network Connection | ||
57 | */ | ||
58 | |||
59 | #include "e1000.h" | ||
60 | |||
61 | #define ICH_FLASH_GFPREG 0x0000 | ||
62 | #define ICH_FLASH_HSFSTS 0x0004 | ||
63 | #define ICH_FLASH_HSFCTL 0x0006 | ||
64 | #define ICH_FLASH_FADDR 0x0008 | ||
65 | #define ICH_FLASH_FDATA0 0x0010 | ||
66 | #define ICH_FLASH_PR0 0x0074 | ||
67 | |||
68 | #define ICH_FLASH_READ_COMMAND_TIMEOUT 500 | ||
69 | #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 | ||
70 | #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 | ||
71 | #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF | ||
72 | #define ICH_FLASH_CYCLE_REPEAT_COUNT 10 | ||
73 | |||
74 | #define ICH_CYCLE_READ 0 | ||
75 | #define ICH_CYCLE_WRITE 2 | ||
76 | #define ICH_CYCLE_ERASE 3 | ||
77 | |||
78 | #define FLASH_GFPREG_BASE_MASK 0x1FFF | ||
79 | #define FLASH_SECTOR_ADDR_SHIFT 12 | ||
80 | |||
81 | #define ICH_FLASH_SEG_SIZE_256 256 | ||
82 | #define ICH_FLASH_SEG_SIZE_4K 4096 | ||
83 | #define ICH_FLASH_SEG_SIZE_8K 8192 | ||
84 | #define ICH_FLASH_SEG_SIZE_64K 65536 | ||
85 | |||
86 | |||
87 | #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ | ||
88 | /* FW established a valid mode */ | ||
89 | #define E1000_ICH_FWSM_FW_VALID 0x00008000 | ||
90 | |||
91 | #define E1000_ICH_MNG_IAMT_MODE 0x2 | ||
92 | |||
93 | #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ | ||
94 | (ID_LED_DEF1_OFF2 << 8) | \ | ||
95 | (ID_LED_DEF1_ON2 << 4) | \ | ||
96 | (ID_LED_DEF1_DEF2)) | ||
97 | |||
98 | #define E1000_ICH_NVM_SIG_WORD 0x13 | ||
99 | #define E1000_ICH_NVM_SIG_MASK 0xC000 | ||
100 | #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 | ||
101 | #define E1000_ICH_NVM_SIG_VALUE 0x80 | ||
102 | |||
103 | #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 | ||
104 | |||
105 | #define E1000_FEXTNVM_SW_CONFIG 1 | ||
106 | #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ | ||
107 | |||
108 | #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 | ||
109 | #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 | ||
110 | #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 | ||
111 | |||
112 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL | ||
113 | |||
114 | #define E1000_ICH_RAR_ENTRIES 7 | ||
115 | |||
116 | #define PHY_PAGE_SHIFT 5 | ||
117 | #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ | ||
118 | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
119 | #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ | ||
120 | #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ | ||
121 | |||
122 | #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 | ||
123 | #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 | ||
124 | #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 | ||
125 | |||
126 | #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ | ||
127 | |||
128 | #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ | ||
129 | |||
130 | /* SMBus Address Phy Register */ | ||
131 | #define HV_SMB_ADDR PHY_REG(768, 26) | ||
132 | #define HV_SMB_ADDR_MASK 0x007F | ||
133 | #define HV_SMB_ADDR_PEC_EN 0x0200 | ||
134 | #define HV_SMB_ADDR_VALID 0x0080 | ||
135 | |||
136 | /* PHY Power Management Control */ | ||
137 | #define HV_PM_CTRL PHY_REG(770, 17) | ||
138 | |||
139 | /* PHY Low Power Idle Control */ | ||
140 | #define I82579_LPI_CTRL PHY_REG(772, 20) | ||
141 | #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 | ||
142 | |||
143 | /* EMI Registers */ | ||
144 | #define I82579_EMI_ADDR 0x10 | ||
145 | #define I82579_EMI_DATA 0x11 | ||
146 | #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ | ||
147 | |||
148 | /* Strapping Option Register - RO */ | ||
149 | #define E1000_STRAP 0x0000C | ||
150 | #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 | ||
151 | #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 | ||
152 | |||
153 | /* OEM Bits Phy Register */ | ||
154 | #define HV_OEM_BITS PHY_REG(768, 25) | ||
155 | #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ | ||
156 | #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ | ||
157 | #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ | ||
158 | |||
159 | #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ | ||
160 | #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ | ||
161 | |||
162 | /* KMRN Mode Control */ | ||
163 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) | ||
164 | #define HV_KMRN_MDIO_SLOW 0x0400 | ||
165 | |||
166 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | ||
167 | /* Offset 04h HSFSTS */ | ||
168 | union ich8_hws_flash_status { | ||
169 | struct ich8_hsfsts { | ||
170 | u16 flcdone :1; /* bit 0 Flash Cycle Done */ | ||
171 | u16 flcerr :1; /* bit 1 Flash Cycle Error */ | ||
172 | u16 dael :1; /* bit 2 Direct Access error Log */ | ||
173 | u16 berasesz :2; /* bit 4:3 Sector Erase Size */ | ||
174 | u16 flcinprog :1; /* bit 5 flash cycle in Progress */ | ||
175 | u16 reserved1 :2; /* bit 13:6 Reserved */ | ||
176 | u16 reserved2 :6; /* bit 13:6 Reserved */ | ||
177 | u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ | ||
178 | u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ | ||
179 | } hsf_status; | ||
180 | u16 regval; | ||
181 | }; | ||
182 | |||
183 | /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ | ||
184 | /* Offset 06h FLCTL */ | ||
185 | union ich8_hws_flash_ctrl { | ||
186 | struct ich8_hsflctl { | ||
187 | u16 flcgo :1; /* 0 Flash Cycle Go */ | ||
188 | u16 flcycle :2; /* 2:1 Flash Cycle */ | ||
189 | u16 reserved :5; /* 7:3 Reserved */ | ||
190 | u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ | ||
191 | u16 flockdn :6; /* 15:10 Reserved */ | ||
192 | } hsf_ctrl; | ||
193 | u16 regval; | ||
194 | }; | ||
195 | |||
196 | /* ICH Flash Region Access Permissions */ | ||
197 | union ich8_hws_flash_regacc { | ||
198 | struct ich8_flracc { | ||
199 | u32 grra :8; /* 0:7 GbE region Read Access */ | ||
200 | u32 grwa :8; /* 8:15 GbE region Write Access */ | ||
201 | u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ | ||
202 | u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ | ||
203 | } hsf_flregacc; | ||
204 | u16 regval; | ||
205 | }; | ||
206 | |||
207 | /* ICH Flash Protected Region */ | ||
208 | union ich8_flash_protected_range { | ||
209 | struct ich8_pr { | ||
210 | u32 base:13; /* 0:12 Protected Range Base */ | ||
211 | u32 reserved1:2; /* 13:14 Reserved */ | ||
212 | u32 rpe:1; /* 15 Read Protection Enable */ | ||
213 | u32 limit:13; /* 16:28 Protected Range Limit */ | ||
214 | u32 reserved2:2; /* 29:30 Reserved */ | ||
215 | u32 wpe:1; /* 31 Write Protection Enable */ | ||
216 | } range; | ||
217 | u32 regval; | ||
218 | }; | ||
219 | |||
220 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); | ||
221 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); | ||
222 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); | ||
223 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); | ||
224 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | ||
225 | u32 offset, u8 byte); | ||
226 | static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, | ||
227 | u8 *data); | ||
228 | static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, | ||
229 | u16 *data); | ||
230 | static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | ||
231 | u8 size, u16 *data); | ||
232 | static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); | ||
233 | static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); | ||
234 | static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); | ||
235 | static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); | ||
236 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); | ||
237 | static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); | ||
238 | static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); | ||
239 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); | ||
240 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); | ||
241 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); | ||
242 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); | ||
243 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | ||
244 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); | ||
245 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | ||
246 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | ||
247 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); | ||
248 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); | ||
249 | static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); | ||
250 | static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); | ||
251 | static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); | ||
252 | |||
253 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | ||
254 | { | ||
255 | return readw(hw->flash_address + reg); | ||
256 | } | ||
257 | |||
258 | static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) | ||
259 | { | ||
260 | return readl(hw->flash_address + reg); | ||
261 | } | ||
262 | |||
263 | static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) | ||
264 | { | ||
265 | writew(val, hw->flash_address + reg); | ||
266 | } | ||
267 | |||
268 | static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) | ||
269 | { | ||
270 | writel(val, hw->flash_address + reg); | ||
271 | } | ||
272 | |||
273 | #define er16flash(reg) __er16flash(hw, (reg)) | ||
274 | #define er32flash(reg) __er32flash(hw, (reg)) | ||
275 | #define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) | ||
276 | #define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) | ||
277 | |||
278 | static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) | ||
279 | { | ||
280 | u32 ctrl; | ||
281 | |||
282 | ctrl = er32(CTRL); | ||
283 | ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; | ||
284 | ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; | ||
285 | ew32(CTRL, ctrl); | ||
286 | e1e_flush(); | ||
287 | udelay(10); | ||
288 | ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; | ||
289 | ew32(CTRL, ctrl); | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * e1000_init_phy_params_pchlan - Initialize PHY function pointers | ||
294 | * @hw: pointer to the HW structure | ||
295 | * | ||
296 | * Initialize family-specific PHY parameters and function pointers. | ||
297 | **/ | ||
298 | static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | ||
299 | { | ||
300 | struct e1000_phy_info *phy = &hw->phy; | ||
301 | u32 fwsm; | ||
302 | s32 ret_val = 0; | ||
303 | |||
304 | phy->addr = 1; | ||
305 | phy->reset_delay_us = 100; | ||
306 | |||
307 | phy->ops.set_page = e1000_set_page_igp; | ||
308 | phy->ops.read_reg = e1000_read_phy_reg_hv; | ||
309 | phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; | ||
310 | phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; | ||
311 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | ||
312 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | ||
313 | phy->ops.write_reg = e1000_write_phy_reg_hv; | ||
314 | phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; | ||
315 | phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; | ||
316 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
317 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
318 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
319 | |||
320 | /* | ||
321 | * The MAC-PHY interconnect may still be in SMBus mode | ||
322 | * after Sx->S0. If the manageability engine (ME) is | ||
323 | * disabled, then toggle the LANPHYPC Value bit to force | ||
324 | * the interconnect to PCIe mode. | ||
325 | */ | ||
326 | fwsm = er32(FWSM); | ||
327 | if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { | ||
328 | e1000_toggle_lanphypc_value_ich8lan(hw); | ||
329 | msleep(50); | ||
330 | |||
331 | /* | ||
332 | * Gate automatic PHY configuration by hardware on | ||
333 | * non-managed 82579 | ||
334 | */ | ||
335 | if (hw->mac.type == e1000_pch2lan) | ||
336 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * Reset the PHY before any access to it. Doing so, ensures that | ||
341 | * the PHY is in a known good state before we read/write PHY registers. | ||
342 | * The generic reset is sufficient here, because we haven't determined | ||
343 | * the PHY type yet. | ||
344 | */ | ||
345 | ret_val = e1000e_phy_hw_reset_generic(hw); | ||
346 | if (ret_val) | ||
347 | goto out; | ||
348 | |||
349 | /* Ungate automatic PHY configuration on non-managed 82579 */ | ||
350 | if ((hw->mac.type == e1000_pch2lan) && | ||
351 | !(fwsm & E1000_ICH_FWSM_FW_VALID)) { | ||
352 | usleep_range(10000, 20000); | ||
353 | e1000_gate_hw_phy_config_ich8lan(hw, false); | ||
354 | } | ||
355 | |||
356 | phy->id = e1000_phy_unknown; | ||
357 | switch (hw->mac.type) { | ||
358 | default: | ||
359 | ret_val = e1000e_get_phy_id(hw); | ||
360 | if (ret_val) | ||
361 | goto out; | ||
362 | if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) | ||
363 | break; | ||
364 | /* fall-through */ | ||
365 | case e1000_pch2lan: | ||
366 | /* | ||
367 | * In case the PHY needs to be in mdio slow mode, | ||
368 | * set slow mode and try to get the PHY id again. | ||
369 | */ | ||
370 | ret_val = e1000_set_mdio_slow_mode_hv(hw); | ||
371 | if (ret_val) | ||
372 | goto out; | ||
373 | ret_val = e1000e_get_phy_id(hw); | ||
374 | if (ret_val) | ||
375 | goto out; | ||
376 | break; | ||
377 | } | ||
378 | phy->type = e1000e_get_phy_type_from_id(phy->id); | ||
379 | |||
380 | switch (phy->type) { | ||
381 | case e1000_phy_82577: | ||
382 | case e1000_phy_82579: | ||
383 | phy->ops.check_polarity = e1000_check_polarity_82577; | ||
384 | phy->ops.force_speed_duplex = | ||
385 | e1000_phy_force_speed_duplex_82577; | ||
386 | phy->ops.get_cable_length = e1000_get_cable_length_82577; | ||
387 | phy->ops.get_info = e1000_get_phy_info_82577; | ||
388 | phy->ops.commit = e1000e_phy_sw_reset; | ||
389 | break; | ||
390 | case e1000_phy_82578: | ||
391 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
392 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
393 | phy->ops.get_cable_length = e1000e_get_cable_length_m88; | ||
394 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
395 | break; | ||
396 | default: | ||
397 | ret_val = -E1000_ERR_PHY; | ||
398 | break; | ||
399 | } | ||
400 | |||
401 | out: | ||
402 | return ret_val; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * e1000_init_phy_params_ich8lan - Initialize PHY function pointers | ||
407 | * @hw: pointer to the HW structure | ||
408 | * | ||
409 | * Initialize family-specific PHY parameters and function pointers. | ||
410 | **/ | ||
411 | static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | ||
412 | { | ||
413 | struct e1000_phy_info *phy = &hw->phy; | ||
414 | s32 ret_val; | ||
415 | u16 i = 0; | ||
416 | |||
417 | phy->addr = 1; | ||
418 | phy->reset_delay_us = 100; | ||
419 | |||
420 | phy->ops.power_up = e1000_power_up_phy_copper; | ||
421 | phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; | ||
422 | |||
423 | /* | ||
424 | * We may need to do this twice - once for IGP and if that fails, | ||
425 | * we'll set BM func pointers and try again | ||
426 | */ | ||
427 | ret_val = e1000e_determine_phy_address(hw); | ||
428 | if (ret_val) { | ||
429 | phy->ops.write_reg = e1000e_write_phy_reg_bm; | ||
430 | phy->ops.read_reg = e1000e_read_phy_reg_bm; | ||
431 | ret_val = e1000e_determine_phy_address(hw); | ||
432 | if (ret_val) { | ||
433 | e_dbg("Cannot determine PHY addr. Erroring out\n"); | ||
434 | return ret_val; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | phy->id = 0; | ||
439 | while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && | ||
440 | (i++ < 100)) { | ||
441 | usleep_range(1000, 2000); | ||
442 | ret_val = e1000e_get_phy_id(hw); | ||
443 | if (ret_val) | ||
444 | return ret_val; | ||
445 | } | ||
446 | |||
447 | /* Verify phy id */ | ||
448 | switch (phy->id) { | ||
449 | case IGP03E1000_E_PHY_ID: | ||
450 | phy->type = e1000_phy_igp_3; | ||
451 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
452 | phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; | ||
453 | phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; | ||
454 | phy->ops.get_info = e1000e_get_phy_info_igp; | ||
455 | phy->ops.check_polarity = e1000_check_polarity_igp; | ||
456 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; | ||
457 | break; | ||
458 | case IFE_E_PHY_ID: | ||
459 | case IFE_PLUS_E_PHY_ID: | ||
460 | case IFE_C_E_PHY_ID: | ||
461 | phy->type = e1000_phy_ife; | ||
462 | phy->autoneg_mask = E1000_ALL_NOT_GIG; | ||
463 | phy->ops.get_info = e1000_get_phy_info_ife; | ||
464 | phy->ops.check_polarity = e1000_check_polarity_ife; | ||
465 | phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; | ||
466 | break; | ||
467 | case BME1000_E_PHY_ID: | ||
468 | phy->type = e1000_phy_bm; | ||
469 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
470 | phy->ops.read_reg = e1000e_read_phy_reg_bm; | ||
471 | phy->ops.write_reg = e1000e_write_phy_reg_bm; | ||
472 | phy->ops.commit = e1000e_phy_sw_reset; | ||
473 | phy->ops.get_info = e1000e_get_phy_info_m88; | ||
474 | phy->ops.check_polarity = e1000_check_polarity_m88; | ||
475 | phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; | ||
476 | break; | ||
477 | default: | ||
478 | return -E1000_ERR_PHY; | ||
479 | break; | ||
480 | } | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers | ||
487 | * @hw: pointer to the HW structure | ||
488 | * | ||
489 | * Initialize family-specific NVM parameters and function | ||
490 | * pointers. | ||
491 | **/ | ||
492 | static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | ||
493 | { | ||
494 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
495 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
496 | u32 gfpreg, sector_base_addr, sector_end_addr; | ||
497 | u16 i; | ||
498 | |||
499 | /* Can't read flash registers if the register set isn't mapped. */ | ||
500 | if (!hw->flash_address) { | ||
501 | e_dbg("ERROR: Flash registers not mapped\n"); | ||
502 | return -E1000_ERR_CONFIG; | ||
503 | } | ||
504 | |||
505 | nvm->type = e1000_nvm_flash_sw; | ||
506 | |||
507 | gfpreg = er32flash(ICH_FLASH_GFPREG); | ||
508 | |||
509 | /* | ||
510 | * sector_X_addr is a "sector"-aligned address (4096 bytes) | ||
511 | * Add 1 to sector_end_addr since this sector is included in | ||
512 | * the overall size. | ||
513 | */ | ||
514 | sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; | ||
515 | sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; | ||
516 | |||
517 | /* flash_base_addr is byte-aligned */ | ||
518 | nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; | ||
519 | |||
520 | /* | ||
521 | * find total size of the NVM, then cut in half since the total | ||
522 | * size represents two separate NVM banks. | ||
523 | */ | ||
524 | nvm->flash_bank_size = (sector_end_addr - sector_base_addr) | ||
525 | << FLASH_SECTOR_ADDR_SHIFT; | ||
526 | nvm->flash_bank_size /= 2; | ||
527 | /* Adjust to word count */ | ||
528 | nvm->flash_bank_size /= sizeof(u16); | ||
529 | |||
530 | nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; | ||
531 | |||
532 | /* Clear shadow ram */ | ||
533 | for (i = 0; i < nvm->word_size; i++) { | ||
534 | dev_spec->shadow_ram[i].modified = false; | ||
535 | dev_spec->shadow_ram[i].value = 0xFFFF; | ||
536 | } | ||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * e1000_init_mac_params_ich8lan - Initialize MAC function pointers | ||
543 | * @hw: pointer to the HW structure | ||
544 | * | ||
545 | * Initialize family-specific MAC parameters and function | ||
546 | * pointers. | ||
547 | **/ | ||
548 | static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | ||
549 | { | ||
550 | struct e1000_hw *hw = &adapter->hw; | ||
551 | struct e1000_mac_info *mac = &hw->mac; | ||
552 | |||
553 | /* Set media type function pointer */ | ||
554 | hw->phy.media_type = e1000_media_type_copper; | ||
555 | |||
556 | /* Set mta register count */ | ||
557 | mac->mta_reg_count = 32; | ||
558 | /* Set rar entry count */ | ||
559 | mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; | ||
560 | if (mac->type == e1000_ich8lan) | ||
561 | mac->rar_entry_count--; | ||
562 | /* FWSM register */ | ||
563 | mac->has_fwsm = true; | ||
564 | /* ARC subsystem not supported */ | ||
565 | mac->arc_subsystem_valid = false; | ||
566 | /* Adaptive IFS supported */ | ||
567 | mac->adaptive_ifs = true; | ||
568 | |||
569 | /* LED operations */ | ||
570 | switch (mac->type) { | ||
571 | case e1000_ich8lan: | ||
572 | case e1000_ich9lan: | ||
573 | case e1000_ich10lan: | ||
574 | /* check management mode */ | ||
575 | mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; | ||
576 | /* ID LED init */ | ||
577 | mac->ops.id_led_init = e1000e_id_led_init; | ||
578 | /* blink LED */ | ||
579 | mac->ops.blink_led = e1000e_blink_led_generic; | ||
580 | /* setup LED */ | ||
581 | mac->ops.setup_led = e1000e_setup_led_generic; | ||
582 | /* cleanup LED */ | ||
583 | mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; | ||
584 | /* turn on/off LED */ | ||
585 | mac->ops.led_on = e1000_led_on_ich8lan; | ||
586 | mac->ops.led_off = e1000_led_off_ich8lan; | ||
587 | break; | ||
588 | case e1000_pchlan: | ||
589 | case e1000_pch2lan: | ||
590 | /* check management mode */ | ||
591 | mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; | ||
592 | /* ID LED init */ | ||
593 | mac->ops.id_led_init = e1000_id_led_init_pchlan; | ||
594 | /* setup LED */ | ||
595 | mac->ops.setup_led = e1000_setup_led_pchlan; | ||
596 | /* cleanup LED */ | ||
597 | mac->ops.cleanup_led = e1000_cleanup_led_pchlan; | ||
598 | /* turn on/off LED */ | ||
599 | mac->ops.led_on = e1000_led_on_pchlan; | ||
600 | mac->ops.led_off = e1000_led_off_pchlan; | ||
601 | break; | ||
602 | default: | ||
603 | break; | ||
604 | } | ||
605 | |||
606 | /* Enable PCS Lock-loss workaround for ICH8 */ | ||
607 | if (mac->type == e1000_ich8lan) | ||
608 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); | ||
609 | |||
610 | /* Gate automatic PHY configuration by hardware on managed 82579 */ | ||
611 | if ((mac->type == e1000_pch2lan) && | ||
612 | (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
613 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
614 | |||
615 | return 0; | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * e1000_set_eee_pchlan - Enable/disable EEE support | ||
620 | * @hw: pointer to the HW structure | ||
621 | * | ||
622 | * Enable/disable EEE based on setting in dev_spec structure. The bits in | ||
623 | * the LPI Control register will remain set only if/when link is up. | ||
624 | **/ | ||
625 | static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) | ||
626 | { | ||
627 | s32 ret_val = 0; | ||
628 | u16 phy_reg; | ||
629 | |||
630 | if (hw->phy.type != e1000_phy_82579) | ||
631 | goto out; | ||
632 | |||
633 | ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); | ||
634 | if (ret_val) | ||
635 | goto out; | ||
636 | |||
637 | if (hw->dev_spec.ich8lan.eee_disable) | ||
638 | phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; | ||
639 | else | ||
640 | phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; | ||
641 | |||
642 | ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); | ||
643 | out: | ||
644 | return ret_val; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * e1000_check_for_copper_link_ich8lan - Check for link (Copper) | ||
649 | * @hw: pointer to the HW structure | ||
650 | * | ||
651 | * Checks to see of the link status of the hardware has changed. If a | ||
652 | * change in link status has been detected, then we read the PHY registers | ||
653 | * to get the current speed/duplex if link exists. | ||
654 | **/ | ||
655 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | ||
656 | { | ||
657 | struct e1000_mac_info *mac = &hw->mac; | ||
658 | s32 ret_val; | ||
659 | bool link; | ||
660 | |||
661 | /* | ||
662 | * We only want to go out to the PHY registers to see if Auto-Neg | ||
663 | * has completed and/or if our link status has changed. The | ||
664 | * get_link_status flag is set upon receiving a Link Status | ||
665 | * Change or Rx Sequence Error interrupt. | ||
666 | */ | ||
667 | if (!mac->get_link_status) { | ||
668 | ret_val = 0; | ||
669 | goto out; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * First we want to see if the MII Status Register reports | ||
674 | * link. If so, then we want to get the current speed/duplex | ||
675 | * of the PHY. | ||
676 | */ | ||
677 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
678 | if (ret_val) | ||
679 | goto out; | ||
680 | |||
681 | if (hw->mac.type == e1000_pchlan) { | ||
682 | ret_val = e1000_k1_gig_workaround_hv(hw, link); | ||
683 | if (ret_val) | ||
684 | goto out; | ||
685 | } | ||
686 | |||
687 | if (!link) | ||
688 | goto out; /* No link detected */ | ||
689 | |||
690 | mac->get_link_status = false; | ||
691 | |||
692 | if (hw->phy.type == e1000_phy_82578) { | ||
693 | ret_val = e1000_link_stall_workaround_hv(hw); | ||
694 | if (ret_val) | ||
695 | goto out; | ||
696 | } | ||
697 | |||
698 | if (hw->mac.type == e1000_pch2lan) { | ||
699 | ret_val = e1000_k1_workaround_lv(hw); | ||
700 | if (ret_val) | ||
701 | goto out; | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * Check if there was DownShift, must be checked | ||
706 | * immediately after link-up | ||
707 | */ | ||
708 | e1000e_check_downshift(hw); | ||
709 | |||
710 | /* Enable/Disable EEE after link up */ | ||
711 | ret_val = e1000_set_eee_pchlan(hw); | ||
712 | if (ret_val) | ||
713 | goto out; | ||
714 | |||
715 | /* | ||
716 | * If we are forcing speed/duplex, then we simply return since | ||
717 | * we have already determined whether we have link or not. | ||
718 | */ | ||
719 | if (!mac->autoneg) { | ||
720 | ret_val = -E1000_ERR_CONFIG; | ||
721 | goto out; | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * Auto-Neg is enabled. Auto Speed Detection takes care | ||
726 | * of MAC speed/duplex configuration. So we only need to | ||
727 | * configure Collision Distance in the MAC. | ||
728 | */ | ||
729 | e1000e_config_collision_dist(hw); | ||
730 | |||
731 | /* | ||
732 | * Configure Flow Control now that Auto-Neg has completed. | ||
733 | * First, we need to restore the desired flow control | ||
734 | * settings because we may have had to re-autoneg with a | ||
735 | * different link partner. | ||
736 | */ | ||
737 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
738 | if (ret_val) | ||
739 | e_dbg("Error configuring flow control\n"); | ||
740 | |||
741 | out: | ||
742 | return ret_val; | ||
743 | } | ||
744 | |||
745 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | ||
746 | { | ||
747 | struct e1000_hw *hw = &adapter->hw; | ||
748 | s32 rc; | ||
749 | |||
750 | rc = e1000_init_mac_params_ich8lan(adapter); | ||
751 | if (rc) | ||
752 | return rc; | ||
753 | |||
754 | rc = e1000_init_nvm_params_ich8lan(hw); | ||
755 | if (rc) | ||
756 | return rc; | ||
757 | |||
758 | switch (hw->mac.type) { | ||
759 | case e1000_ich8lan: | ||
760 | case e1000_ich9lan: | ||
761 | case e1000_ich10lan: | ||
762 | rc = e1000_init_phy_params_ich8lan(hw); | ||
763 | break; | ||
764 | case e1000_pchlan: | ||
765 | case e1000_pch2lan: | ||
766 | rc = e1000_init_phy_params_pchlan(hw); | ||
767 | break; | ||
768 | default: | ||
769 | break; | ||
770 | } | ||
771 | if (rc) | ||
772 | return rc; | ||
773 | |||
774 | /* | ||
775 | * Disable Jumbo Frame support on parts with Intel 10/100 PHY or | ||
776 | * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). | ||
777 | */ | ||
778 | if ((adapter->hw.phy.type == e1000_phy_ife) || | ||
779 | ((adapter->hw.mac.type >= e1000_pch2lan) && | ||
780 | (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { | ||
781 | adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; | ||
782 | adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; | ||
783 | |||
784 | hw->mac.ops.blink_led = NULL; | ||
785 | } | ||
786 | |||
787 | if ((adapter->hw.mac.type == e1000_ich8lan) && | ||
788 | (adapter->hw.phy.type == e1000_phy_igp_3)) | ||
789 | adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; | ||
790 | |||
791 | /* Disable EEE by default until IEEE802.3az spec is finalized */ | ||
792 | if (adapter->flags2 & FLAG2_HAS_EEE) | ||
793 | adapter->hw.dev_spec.ich8lan.eee_disable = true; | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static DEFINE_MUTEX(nvm_mutex); | ||
799 | |||
800 | /** | ||
801 | * e1000_acquire_nvm_ich8lan - Acquire NVM mutex | ||
802 | * @hw: pointer to the HW structure | ||
803 | * | ||
804 | * Acquires the mutex for performing NVM operations. | ||
805 | **/ | ||
806 | static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) | ||
807 | { | ||
808 | mutex_lock(&nvm_mutex); | ||
809 | |||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | /** | ||
814 | * e1000_release_nvm_ich8lan - Release NVM mutex | ||
815 | * @hw: pointer to the HW structure | ||
816 | * | ||
817 | * Releases the mutex used while performing NVM operations. | ||
818 | **/ | ||
819 | static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) | ||
820 | { | ||
821 | mutex_unlock(&nvm_mutex); | ||
822 | } | ||
823 | |||
824 | static DEFINE_MUTEX(swflag_mutex); | ||
825 | |||
826 | /** | ||
827 | * e1000_acquire_swflag_ich8lan - Acquire software control flag | ||
828 | * @hw: pointer to the HW structure | ||
829 | * | ||
830 | * Acquires the software control flag for performing PHY and select | ||
831 | * MAC CSR accesses. | ||
832 | **/ | ||
833 | static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | ||
834 | { | ||
835 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; | ||
836 | s32 ret_val = 0; | ||
837 | |||
838 | mutex_lock(&swflag_mutex); | ||
839 | |||
840 | while (timeout) { | ||
841 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
842 | if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) | ||
843 | break; | ||
844 | |||
845 | mdelay(1); | ||
846 | timeout--; | ||
847 | } | ||
848 | |||
849 | if (!timeout) { | ||
850 | e_dbg("SW/FW/HW has locked the resource for too long.\n"); | ||
851 | ret_val = -E1000_ERR_CONFIG; | ||
852 | goto out; | ||
853 | } | ||
854 | |||
855 | timeout = SW_FLAG_TIMEOUT; | ||
856 | |||
857 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | ||
858 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
859 | |||
860 | while (timeout) { | ||
861 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
862 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | ||
863 | break; | ||
864 | |||
865 | mdelay(1); | ||
866 | timeout--; | ||
867 | } | ||
868 | |||
869 | if (!timeout) { | ||
870 | e_dbg("Failed to acquire the semaphore.\n"); | ||
871 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | ||
872 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
873 | ret_val = -E1000_ERR_CONFIG; | ||
874 | goto out; | ||
875 | } | ||
876 | |||
877 | out: | ||
878 | if (ret_val) | ||
879 | mutex_unlock(&swflag_mutex); | ||
880 | |||
881 | return ret_val; | ||
882 | } | ||
883 | |||
884 | /** | ||
885 | * e1000_release_swflag_ich8lan - Release software control flag | ||
886 | * @hw: pointer to the HW structure | ||
887 | * | ||
888 | * Releases the software control flag for performing PHY and select | ||
889 | * MAC CSR accesses. | ||
890 | **/ | ||
891 | static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | ||
892 | { | ||
893 | u32 extcnf_ctrl; | ||
894 | |||
895 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
896 | |||
897 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { | ||
898 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | ||
899 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
900 | } else { | ||
901 | e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); | ||
902 | } | ||
903 | |||
904 | mutex_unlock(&swflag_mutex); | ||
905 | } | ||
906 | |||
907 | /** | ||
908 | * e1000_check_mng_mode_ich8lan - Checks management mode | ||
909 | * @hw: pointer to the HW structure | ||
910 | * | ||
911 | * This checks if the adapter has any manageability enabled. | ||
912 | * This is a function pointer entry point only called by read/write | ||
913 | * routines for the PHY and NVM parts. | ||
914 | **/ | ||
915 | static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) | ||
916 | { | ||
917 | u32 fwsm; | ||
918 | |||
919 | fwsm = er32(FWSM); | ||
920 | return (fwsm & E1000_ICH_FWSM_FW_VALID) && | ||
921 | ((fwsm & E1000_FWSM_MODE_MASK) == | ||
922 | (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); | ||
923 | } | ||
924 | |||
925 | /** | ||
926 | * e1000_check_mng_mode_pchlan - Checks management mode | ||
927 | * @hw: pointer to the HW structure | ||
928 | * | ||
929 | * This checks if the adapter has iAMT enabled. | ||
930 | * This is a function pointer entry point only called by read/write | ||
931 | * routines for the PHY and NVM parts. | ||
932 | **/ | ||
933 | static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) | ||
934 | { | ||
935 | u32 fwsm; | ||
936 | |||
937 | fwsm = er32(FWSM); | ||
938 | return (fwsm & E1000_ICH_FWSM_FW_VALID) && | ||
939 | (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked | ||
944 | * @hw: pointer to the HW structure | ||
945 | * | ||
946 | * Checks if firmware is blocking the reset of the PHY. | ||
947 | * This is a function pointer entry point only called by | ||
948 | * reset routines. | ||
949 | **/ | ||
950 | static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) | ||
951 | { | ||
952 | u32 fwsm; | ||
953 | |||
954 | fwsm = er32(FWSM); | ||
955 | |||
956 | return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; | ||
957 | } | ||
958 | |||
959 | /** | ||
960 | * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states | ||
961 | * @hw: pointer to the HW structure | ||
962 | * | ||
963 | * Assumes semaphore already acquired. | ||
964 | * | ||
965 | **/ | ||
966 | static s32 e1000_write_smbus_addr(struct e1000_hw *hw) | ||
967 | { | ||
968 | u16 phy_data; | ||
969 | u32 strap = er32(STRAP); | ||
970 | s32 ret_val = 0; | ||
971 | |||
972 | strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; | ||
973 | |||
974 | ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); | ||
975 | if (ret_val) | ||
976 | goto out; | ||
977 | |||
978 | phy_data &= ~HV_SMB_ADDR_MASK; | ||
979 | phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); | ||
980 | phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; | ||
981 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); | ||
982 | |||
983 | out: | ||
984 | return ret_val; | ||
985 | } | ||
986 | |||
987 | /** | ||
988 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration | ||
989 | * @hw: pointer to the HW structure | ||
990 | * | ||
991 | * SW should configure the LCD from the NVM extended configuration region | ||
992 | * as a workaround for certain parts. | ||
993 | **/ | ||
994 | static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | ||
995 | { | ||
996 | struct e1000_phy_info *phy = &hw->phy; | ||
997 | u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; | ||
998 | s32 ret_val = 0; | ||
999 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | ||
1000 | |||
1001 | /* | ||
1002 | * Initialize the PHY from the NVM on ICH platforms. This | ||
1003 | * is needed due to an issue where the NVM configuration is | ||
1004 | * not properly autoloaded after power transitions. | ||
1005 | * Therefore, after each PHY reset, we will load the | ||
1006 | * configuration data out of the NVM manually. | ||
1007 | */ | ||
1008 | switch (hw->mac.type) { | ||
1009 | case e1000_ich8lan: | ||
1010 | if (phy->type != e1000_phy_igp_3) | ||
1011 | return ret_val; | ||
1012 | |||
1013 | if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || | ||
1014 | (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { | ||
1015 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; | ||
1016 | break; | ||
1017 | } | ||
1018 | /* Fall-thru */ | ||
1019 | case e1000_pchlan: | ||
1020 | case e1000_pch2lan: | ||
1021 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; | ||
1022 | break; | ||
1023 | default: | ||
1024 | return ret_val; | ||
1025 | } | ||
1026 | |||
1027 | ret_val = hw->phy.ops.acquire(hw); | ||
1028 | if (ret_val) | ||
1029 | return ret_val; | ||
1030 | |||
1031 | data = er32(FEXTNVM); | ||
1032 | if (!(data & sw_cfg_mask)) | ||
1033 | goto out; | ||
1034 | |||
1035 | /* | ||
1036 | * Make sure HW does not configure LCD from PHY | ||
1037 | * extended configuration before SW configuration | ||
1038 | */ | ||
1039 | data = er32(EXTCNF_CTRL); | ||
1040 | if (!(hw->mac.type == e1000_pch2lan)) { | ||
1041 | if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) | ||
1042 | goto out; | ||
1043 | } | ||
1044 | |||
1045 | cnf_size = er32(EXTCNF_SIZE); | ||
1046 | cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; | ||
1047 | cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; | ||
1048 | if (!cnf_size) | ||
1049 | goto out; | ||
1050 | |||
1051 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | ||
1052 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | ||
1053 | |||
1054 | if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && | ||
1055 | (hw->mac.type == e1000_pchlan)) || | ||
1056 | (hw->mac.type == e1000_pch2lan)) { | ||
1057 | /* | ||
1058 | * HW configures the SMBus address and LEDs when the | ||
1059 | * OEM and LCD Write Enable bits are set in the NVM. | ||
1060 | * When both NVM bits are cleared, SW will configure | ||
1061 | * them instead. | ||
1062 | */ | ||
1063 | ret_val = e1000_write_smbus_addr(hw); | ||
1064 | if (ret_val) | ||
1065 | goto out; | ||
1066 | |||
1067 | data = er32(LEDCTL); | ||
1068 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, | ||
1069 | (u16)data); | ||
1070 | if (ret_val) | ||
1071 | goto out; | ||
1072 | } | ||
1073 | |||
1074 | /* Configure LCD from extended configuration region. */ | ||
1075 | |||
1076 | /* cnf_base_addr is in DWORD */ | ||
1077 | word_addr = (u16)(cnf_base_addr << 1); | ||
1078 | |||
1079 | for (i = 0; i < cnf_size; i++) { | ||
1080 | ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, | ||
1081 | ®_data); | ||
1082 | if (ret_val) | ||
1083 | goto out; | ||
1084 | |||
1085 | ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), | ||
1086 | 1, ®_addr); | ||
1087 | if (ret_val) | ||
1088 | goto out; | ||
1089 | |||
1090 | /* Save off the PHY page for future writes. */ | ||
1091 | if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { | ||
1092 | phy_page = reg_data; | ||
1093 | continue; | ||
1094 | } | ||
1095 | |||
1096 | reg_addr &= PHY_REG_MASK; | ||
1097 | reg_addr |= phy_page; | ||
1098 | |||
1099 | ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, | ||
1100 | reg_data); | ||
1101 | if (ret_val) | ||
1102 | goto out; | ||
1103 | } | ||
1104 | |||
1105 | out: | ||
1106 | hw->phy.ops.release(hw); | ||
1107 | return ret_val; | ||
1108 | } | ||
1109 | |||
1110 | /** | ||
1111 | * e1000_k1_gig_workaround_hv - K1 Si workaround | ||
1112 | * @hw: pointer to the HW structure | ||
1113 | * @link: link up bool flag | ||
1114 | * | ||
1115 | * If K1 is enabled for 1Gbps, the MAC might stall when transitioning | ||
1116 | * from a lower speed. This workaround disables K1 whenever link is at 1Gig | ||
1117 | * If link is down, the function will restore the default K1 setting located | ||
1118 | * in the NVM. | ||
1119 | **/ | ||
1120 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | ||
1121 | { | ||
1122 | s32 ret_val = 0; | ||
1123 | u16 status_reg = 0; | ||
1124 | bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; | ||
1125 | |||
1126 | if (hw->mac.type != e1000_pchlan) | ||
1127 | goto out; | ||
1128 | |||
1129 | /* Wrap the whole flow with the sw flag */ | ||
1130 | ret_val = hw->phy.ops.acquire(hw); | ||
1131 | if (ret_val) | ||
1132 | goto out; | ||
1133 | |||
1134 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | ||
1135 | if (link) { | ||
1136 | if (hw->phy.type == e1000_phy_82578) { | ||
1137 | ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, | ||
1138 | &status_reg); | ||
1139 | if (ret_val) | ||
1140 | goto release; | ||
1141 | |||
1142 | status_reg &= BM_CS_STATUS_LINK_UP | | ||
1143 | BM_CS_STATUS_RESOLVED | | ||
1144 | BM_CS_STATUS_SPEED_MASK; | ||
1145 | |||
1146 | if (status_reg == (BM_CS_STATUS_LINK_UP | | ||
1147 | BM_CS_STATUS_RESOLVED | | ||
1148 | BM_CS_STATUS_SPEED_1000)) | ||
1149 | k1_enable = false; | ||
1150 | } | ||
1151 | |||
1152 | if (hw->phy.type == e1000_phy_82577) { | ||
1153 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, | ||
1154 | &status_reg); | ||
1155 | if (ret_val) | ||
1156 | goto release; | ||
1157 | |||
1158 | status_reg &= HV_M_STATUS_LINK_UP | | ||
1159 | HV_M_STATUS_AUTONEG_COMPLETE | | ||
1160 | HV_M_STATUS_SPEED_MASK; | ||
1161 | |||
1162 | if (status_reg == (HV_M_STATUS_LINK_UP | | ||
1163 | HV_M_STATUS_AUTONEG_COMPLETE | | ||
1164 | HV_M_STATUS_SPEED_1000)) | ||
1165 | k1_enable = false; | ||
1166 | } | ||
1167 | |||
1168 | /* Link stall fix for link up */ | ||
1169 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), | ||
1170 | 0x0100); | ||
1171 | if (ret_val) | ||
1172 | goto release; | ||
1173 | |||
1174 | } else { | ||
1175 | /* Link stall fix for link down */ | ||
1176 | ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), | ||
1177 | 0x4100); | ||
1178 | if (ret_val) | ||
1179 | goto release; | ||
1180 | } | ||
1181 | |||
1182 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); | ||
1183 | |||
1184 | release: | ||
1185 | hw->phy.ops.release(hw); | ||
1186 | out: | ||
1187 | return ret_val; | ||
1188 | } | ||
1189 | |||
1190 | /** | ||
1191 | * e1000_configure_k1_ich8lan - Configure K1 power state | ||
1192 | * @hw: pointer to the HW structure | ||
1193 | * @enable: K1 state to configure | ||
1194 | * | ||
1195 | * Configure the K1 power state based on the provided parameter. | ||
1196 | * Assumes semaphore already acquired. | ||
1197 | * | ||
1198 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
1199 | **/ | ||
1200 | s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | ||
1201 | { | ||
1202 | s32 ret_val = 0; | ||
1203 | u32 ctrl_reg = 0; | ||
1204 | u32 ctrl_ext = 0; | ||
1205 | u32 reg = 0; | ||
1206 | u16 kmrn_reg = 0; | ||
1207 | |||
1208 | ret_val = e1000e_read_kmrn_reg_locked(hw, | ||
1209 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
1210 | &kmrn_reg); | ||
1211 | if (ret_val) | ||
1212 | goto out; | ||
1213 | |||
1214 | if (k1_enable) | ||
1215 | kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; | ||
1216 | else | ||
1217 | kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; | ||
1218 | |||
1219 | ret_val = e1000e_write_kmrn_reg_locked(hw, | ||
1220 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
1221 | kmrn_reg); | ||
1222 | if (ret_val) | ||
1223 | goto out; | ||
1224 | |||
1225 | udelay(20); | ||
1226 | ctrl_ext = er32(CTRL_EXT); | ||
1227 | ctrl_reg = er32(CTRL); | ||
1228 | |||
1229 | reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | ||
1230 | reg |= E1000_CTRL_FRCSPD; | ||
1231 | ew32(CTRL, reg); | ||
1232 | |||
1233 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); | ||
1234 | e1e_flush(); | ||
1235 | udelay(20); | ||
1236 | ew32(CTRL, ctrl_reg); | ||
1237 | ew32(CTRL_EXT, ctrl_ext); | ||
1238 | e1e_flush(); | ||
1239 | udelay(20); | ||
1240 | |||
1241 | out: | ||
1242 | return ret_val; | ||
1243 | } | ||
1244 | |||
1245 | /** | ||
1246 | * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration | ||
1247 | * @hw: pointer to the HW structure | ||
1248 | * @d0_state: boolean if entering d0 or d3 device state | ||
1249 | * | ||
1250 | * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are | ||
1251 | * collectively called OEM bits. The OEM Write Enable bit and SW Config bit | ||
1252 | * in NVM determines whether HW should configure LPLU and Gbe Disable. | ||
1253 | **/ | ||
1254 | static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | ||
1255 | { | ||
1256 | s32 ret_val = 0; | ||
1257 | u32 mac_reg; | ||
1258 | u16 oem_reg; | ||
1259 | |||
1260 | if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) | ||
1261 | return ret_val; | ||
1262 | |||
1263 | ret_val = hw->phy.ops.acquire(hw); | ||
1264 | if (ret_val) | ||
1265 | return ret_val; | ||
1266 | |||
1267 | if (!(hw->mac.type == e1000_pch2lan)) { | ||
1268 | mac_reg = er32(EXTCNF_CTRL); | ||
1269 | if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) | ||
1270 | goto out; | ||
1271 | } | ||
1272 | |||
1273 | mac_reg = er32(FEXTNVM); | ||
1274 | if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) | ||
1275 | goto out; | ||
1276 | |||
1277 | mac_reg = er32(PHY_CTRL); | ||
1278 | |||
1279 | ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); | ||
1280 | if (ret_val) | ||
1281 | goto out; | ||
1282 | |||
1283 | oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); | ||
1284 | |||
1285 | if (d0_state) { | ||
1286 | if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) | ||
1287 | oem_reg |= HV_OEM_BITS_GBE_DIS; | ||
1288 | |||
1289 | if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) | ||
1290 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1291 | } else { | ||
1292 | if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) | ||
1293 | oem_reg |= HV_OEM_BITS_GBE_DIS; | ||
1294 | |||
1295 | if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) | ||
1296 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1297 | } | ||
1298 | /* Restart auto-neg to activate the bits */ | ||
1299 | if (!e1000_check_reset_block(hw)) | ||
1300 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
1301 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); | ||
1302 | |||
1303 | out: | ||
1304 | hw->phy.ops.release(hw); | ||
1305 | |||
1306 | return ret_val; | ||
1307 | } | ||
1308 | |||
1309 | |||
1310 | /** | ||
1311 | * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode | ||
1312 | * @hw: pointer to the HW structure | ||
1313 | **/ | ||
1314 | static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) | ||
1315 | { | ||
1316 | s32 ret_val; | ||
1317 | u16 data; | ||
1318 | |||
1319 | ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); | ||
1320 | if (ret_val) | ||
1321 | return ret_val; | ||
1322 | |||
1323 | data |= HV_KMRN_MDIO_SLOW; | ||
1324 | |||
1325 | ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); | ||
1326 | |||
1327 | return ret_val; | ||
1328 | } | ||
1329 | |||
1330 | /** | ||
1331 | * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be | ||
1332 | * done after every PHY reset. | ||
1333 | **/ | ||
1334 | static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | ||
1335 | { | ||
1336 | s32 ret_val = 0; | ||
1337 | u16 phy_data; | ||
1338 | |||
1339 | if (hw->mac.type != e1000_pchlan) | ||
1340 | return ret_val; | ||
1341 | |||
1342 | /* Set MDIO slow mode before any other MDIO access */ | ||
1343 | if (hw->phy.type == e1000_phy_82577) { | ||
1344 | ret_val = e1000_set_mdio_slow_mode_hv(hw); | ||
1345 | if (ret_val) | ||
1346 | goto out; | ||
1347 | } | ||
1348 | |||
1349 | if (((hw->phy.type == e1000_phy_82577) && | ||
1350 | ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || | ||
1351 | ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { | ||
1352 | /* Disable generation of early preamble */ | ||
1353 | ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); | ||
1354 | if (ret_val) | ||
1355 | return ret_val; | ||
1356 | |||
1357 | /* Preamble tuning for SSC */ | ||
1358 | ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); | ||
1359 | if (ret_val) | ||
1360 | return ret_val; | ||
1361 | } | ||
1362 | |||
1363 | if (hw->phy.type == e1000_phy_82578) { | ||
1364 | /* | ||
1365 | * Return registers to default by doing a soft reset then | ||
1366 | * writing 0x3140 to the control register. | ||
1367 | */ | ||
1368 | if (hw->phy.revision < 2) { | ||
1369 | e1000e_phy_sw_reset(hw); | ||
1370 | ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); | ||
1371 | } | ||
1372 | } | ||
1373 | |||
1374 | /* Select page 0 */ | ||
1375 | ret_val = hw->phy.ops.acquire(hw); | ||
1376 | if (ret_val) | ||
1377 | return ret_val; | ||
1378 | |||
1379 | hw->phy.addr = 1; | ||
1380 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | ||
1381 | hw->phy.ops.release(hw); | ||
1382 | if (ret_val) | ||
1383 | goto out; | ||
1384 | |||
1385 | /* | ||
1386 | * Configure the K1 Si workaround during phy reset assuming there is | ||
1387 | * link so that it disables K1 if link is in 1Gbps. | ||
1388 | */ | ||
1389 | ret_val = e1000_k1_gig_workaround_hv(hw, true); | ||
1390 | if (ret_val) | ||
1391 | goto out; | ||
1392 | |||
1393 | /* Workaround for link disconnects on a busy hub in half duplex */ | ||
1394 | ret_val = hw->phy.ops.acquire(hw); | ||
1395 | if (ret_val) | ||
1396 | goto out; | ||
1397 | ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); | ||
1398 | if (ret_val) | ||
1399 | goto release; | ||
1400 | ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, | ||
1401 | phy_data & 0x00FF); | ||
1402 | release: | ||
1403 | hw->phy.ops.release(hw); | ||
1404 | out: | ||
1405 | return ret_val; | ||
1406 | } | ||
1407 | |||
1408 | /** | ||
1409 | * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY | ||
1410 | * @hw: pointer to the HW structure | ||
1411 | **/ | ||
1412 | void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) | ||
1413 | { | ||
1414 | u32 mac_reg; | ||
1415 | u16 i, phy_reg = 0; | ||
1416 | s32 ret_val; | ||
1417 | |||
1418 | ret_val = hw->phy.ops.acquire(hw); | ||
1419 | if (ret_val) | ||
1420 | return; | ||
1421 | ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); | ||
1422 | if (ret_val) | ||
1423 | goto release; | ||
1424 | |||
1425 | /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ | ||
1426 | for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { | ||
1427 | mac_reg = er32(RAL(i)); | ||
1428 | hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), | ||
1429 | (u16)(mac_reg & 0xFFFF)); | ||
1430 | hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), | ||
1431 | (u16)((mac_reg >> 16) & 0xFFFF)); | ||
1432 | |||
1433 | mac_reg = er32(RAH(i)); | ||
1434 | hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), | ||
1435 | (u16)(mac_reg & 0xFFFF)); | ||
1436 | hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), | ||
1437 | (u16)((mac_reg & E1000_RAH_AV) | ||
1438 | >> 16)); | ||
1439 | } | ||
1440 | |||
1441 | e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); | ||
1442 | |||
1443 | release: | ||
1444 | hw->phy.ops.release(hw); | ||
1445 | } | ||
1446 | |||
1447 | /** | ||
1448 | * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation | ||
1449 | * with 82579 PHY | ||
1450 | * @hw: pointer to the HW structure | ||
1451 | * @enable: flag to enable/disable workaround when enabling/disabling jumbos | ||
1452 | **/ | ||
1453 | s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | ||
1454 | { | ||
1455 | s32 ret_val = 0; | ||
1456 | u16 phy_reg, data; | ||
1457 | u32 mac_reg; | ||
1458 | u16 i; | ||
1459 | |||
1460 | if (hw->mac.type != e1000_pch2lan) | ||
1461 | goto out; | ||
1462 | |||
1463 | /* disable Rx path while enabling/disabling workaround */ | ||
1464 | e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); | ||
1465 | ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); | ||
1466 | if (ret_val) | ||
1467 | goto out; | ||
1468 | |||
1469 | if (enable) { | ||
1470 | /* | ||
1471 | * Write Rx addresses (rar_entry_count for RAL/H, +4 for | ||
1472 | * SHRAL/H) and initial CRC values to the MAC | ||
1473 | */ | ||
1474 | for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { | ||
1475 | u8 mac_addr[ETH_ALEN] = {0}; | ||
1476 | u32 addr_high, addr_low; | ||
1477 | |||
1478 | addr_high = er32(RAH(i)); | ||
1479 | if (!(addr_high & E1000_RAH_AV)) | ||
1480 | continue; | ||
1481 | addr_low = er32(RAL(i)); | ||
1482 | mac_addr[0] = (addr_low & 0xFF); | ||
1483 | mac_addr[1] = ((addr_low >> 8) & 0xFF); | ||
1484 | mac_addr[2] = ((addr_low >> 16) & 0xFF); | ||
1485 | mac_addr[3] = ((addr_low >> 24) & 0xFF); | ||
1486 | mac_addr[4] = (addr_high & 0xFF); | ||
1487 | mac_addr[5] = ((addr_high >> 8) & 0xFF); | ||
1488 | |||
1489 | ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); | ||
1490 | } | ||
1491 | |||
1492 | /* Write Rx addresses to the PHY */ | ||
1493 | e1000_copy_rx_addrs_to_phy_ich8lan(hw); | ||
1494 | |||
1495 | /* Enable jumbo frame workaround in the MAC */ | ||
1496 | mac_reg = er32(FFLT_DBG); | ||
1497 | mac_reg &= ~(1 << 14); | ||
1498 | mac_reg |= (7 << 15); | ||
1499 | ew32(FFLT_DBG, mac_reg); | ||
1500 | |||
1501 | mac_reg = er32(RCTL); | ||
1502 | mac_reg |= E1000_RCTL_SECRC; | ||
1503 | ew32(RCTL, mac_reg); | ||
1504 | |||
1505 | ret_val = e1000e_read_kmrn_reg(hw, | ||
1506 | E1000_KMRNCTRLSTA_CTRL_OFFSET, | ||
1507 | &data); | ||
1508 | if (ret_val) | ||
1509 | goto out; | ||
1510 | ret_val = e1000e_write_kmrn_reg(hw, | ||
1511 | E1000_KMRNCTRLSTA_CTRL_OFFSET, | ||
1512 | data | (1 << 0)); | ||
1513 | if (ret_val) | ||
1514 | goto out; | ||
1515 | ret_val = e1000e_read_kmrn_reg(hw, | ||
1516 | E1000_KMRNCTRLSTA_HD_CTRL, | ||
1517 | &data); | ||
1518 | if (ret_val) | ||
1519 | goto out; | ||
1520 | data &= ~(0xF << 8); | ||
1521 | data |= (0xB << 8); | ||
1522 | ret_val = e1000e_write_kmrn_reg(hw, | ||
1523 | E1000_KMRNCTRLSTA_HD_CTRL, | ||
1524 | data); | ||
1525 | if (ret_val) | ||
1526 | goto out; | ||
1527 | |||
1528 | /* Enable jumbo frame workaround in the PHY */ | ||
1529 | e1e_rphy(hw, PHY_REG(769, 23), &data); | ||
1530 | data &= ~(0x7F << 5); | ||
1531 | data |= (0x37 << 5); | ||
1532 | ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); | ||
1533 | if (ret_val) | ||
1534 | goto out; | ||
1535 | e1e_rphy(hw, PHY_REG(769, 16), &data); | ||
1536 | data &= ~(1 << 13); | ||
1537 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | ||
1538 | if (ret_val) | ||
1539 | goto out; | ||
1540 | e1e_rphy(hw, PHY_REG(776, 20), &data); | ||
1541 | data &= ~(0x3FF << 2); | ||
1542 | data |= (0x1A << 2); | ||
1543 | ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); | ||
1544 | if (ret_val) | ||
1545 | goto out; | ||
1546 | ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00); | ||
1547 | if (ret_val) | ||
1548 | goto out; | ||
1549 | e1e_rphy(hw, HV_PM_CTRL, &data); | ||
1550 | ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); | ||
1551 | if (ret_val) | ||
1552 | goto out; | ||
1553 | } else { | ||
1554 | /* Write MAC register values back to h/w defaults */ | ||
1555 | mac_reg = er32(FFLT_DBG); | ||
1556 | mac_reg &= ~(0xF << 14); | ||
1557 | ew32(FFLT_DBG, mac_reg); | ||
1558 | |||
1559 | mac_reg = er32(RCTL); | ||
1560 | mac_reg &= ~E1000_RCTL_SECRC; | ||
1561 | ew32(RCTL, mac_reg); | ||
1562 | |||
1563 | ret_val = e1000e_read_kmrn_reg(hw, | ||
1564 | E1000_KMRNCTRLSTA_CTRL_OFFSET, | ||
1565 | &data); | ||
1566 | if (ret_val) | ||
1567 | goto out; | ||
1568 | ret_val = e1000e_write_kmrn_reg(hw, | ||
1569 | E1000_KMRNCTRLSTA_CTRL_OFFSET, | ||
1570 | data & ~(1 << 0)); | ||
1571 | if (ret_val) | ||
1572 | goto out; | ||
1573 | ret_val = e1000e_read_kmrn_reg(hw, | ||
1574 | E1000_KMRNCTRLSTA_HD_CTRL, | ||
1575 | &data); | ||
1576 | if (ret_val) | ||
1577 | goto out; | ||
1578 | data &= ~(0xF << 8); | ||
1579 | data |= (0xB << 8); | ||
1580 | ret_val = e1000e_write_kmrn_reg(hw, | ||
1581 | E1000_KMRNCTRLSTA_HD_CTRL, | ||
1582 | data); | ||
1583 | if (ret_val) | ||
1584 | goto out; | ||
1585 | |||
1586 | /* Write PHY register values back to h/w defaults */ | ||
1587 | e1e_rphy(hw, PHY_REG(769, 23), &data); | ||
1588 | data &= ~(0x7F << 5); | ||
1589 | ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); | ||
1590 | if (ret_val) | ||
1591 | goto out; | ||
1592 | e1e_rphy(hw, PHY_REG(769, 16), &data); | ||
1593 | data |= (1 << 13); | ||
1594 | ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); | ||
1595 | if (ret_val) | ||
1596 | goto out; | ||
1597 | e1e_rphy(hw, PHY_REG(776, 20), &data); | ||
1598 | data &= ~(0x3FF << 2); | ||
1599 | data |= (0x8 << 2); | ||
1600 | ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); | ||
1601 | if (ret_val) | ||
1602 | goto out; | ||
1603 | ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); | ||
1604 | if (ret_val) | ||
1605 | goto out; | ||
1606 | e1e_rphy(hw, HV_PM_CTRL, &data); | ||
1607 | ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); | ||
1608 | if (ret_val) | ||
1609 | goto out; | ||
1610 | } | ||
1611 | |||
1612 | /* re-enable Rx path after enabling/disabling workaround */ | ||
1613 | ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); | ||
1614 | |||
1615 | out: | ||
1616 | return ret_val; | ||
1617 | } | ||
1618 | |||
1619 | /** | ||
1620 | * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be | ||
1621 | * done after every PHY reset. | ||
1622 | **/ | ||
1623 | static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) | ||
1624 | { | ||
1625 | s32 ret_val = 0; | ||
1626 | |||
1627 | if (hw->mac.type != e1000_pch2lan) | ||
1628 | goto out; | ||
1629 | |||
1630 | /* Set MDIO slow mode before any other MDIO access */ | ||
1631 | ret_val = e1000_set_mdio_slow_mode_hv(hw); | ||
1632 | |||
1633 | out: | ||
1634 | return ret_val; | ||
1635 | } | ||
1636 | |||
1637 | /** | ||
1638 | * e1000_k1_gig_workaround_lv - K1 Si workaround | ||
1639 | * @hw: pointer to the HW structure | ||
1640 | * | ||
1641 | * Workaround to set the K1 beacon duration for 82579 parts | ||
1642 | **/ | ||
1643 | static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | ||
1644 | { | ||
1645 | s32 ret_val = 0; | ||
1646 | u16 status_reg = 0; | ||
1647 | u32 mac_reg; | ||
1648 | |||
1649 | if (hw->mac.type != e1000_pch2lan) | ||
1650 | goto out; | ||
1651 | |||
1652 | /* Set K1 beacon duration based on 1Gbps speed or otherwise */ | ||
1653 | ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); | ||
1654 | if (ret_val) | ||
1655 | goto out; | ||
1656 | |||
1657 | if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) | ||
1658 | == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { | ||
1659 | mac_reg = er32(FEXTNVM4); | ||
1660 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; | ||
1661 | |||
1662 | if (status_reg & HV_M_STATUS_SPEED_1000) | ||
1663 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; | ||
1664 | else | ||
1665 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; | ||
1666 | |||
1667 | ew32(FEXTNVM4, mac_reg); | ||
1668 | } | ||
1669 | |||
1670 | out: | ||
1671 | return ret_val; | ||
1672 | } | ||
1673 | |||
1674 | /** | ||
1675 | * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware | ||
1676 | * @hw: pointer to the HW structure | ||
1677 | * @gate: boolean set to true to gate, false to ungate | ||
1678 | * | ||
1679 | * Gate/ungate the automatic PHY configuration via hardware; perform | ||
1680 | * the configuration via software instead. | ||
1681 | **/ | ||
1682 | static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) | ||
1683 | { | ||
1684 | u32 extcnf_ctrl; | ||
1685 | |||
1686 | if (hw->mac.type != e1000_pch2lan) | ||
1687 | return; | ||
1688 | |||
1689 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
1690 | |||
1691 | if (gate) | ||
1692 | extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; | ||
1693 | else | ||
1694 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; | ||
1695 | |||
1696 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
1697 | return; | ||
1698 | } | ||
1699 | |||
1700 | /** | ||
1701 | * e1000_lan_init_done_ich8lan - Check for PHY config completion | ||
1702 | * @hw: pointer to the HW structure | ||
1703 | * | ||
1704 | * Check the appropriate indication the MAC has finished configuring the | ||
1705 | * PHY after a software reset. | ||
1706 | **/ | ||
1707 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | ||
1708 | { | ||
1709 | u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; | ||
1710 | |||
1711 | /* Wait for basic configuration completes before proceeding */ | ||
1712 | do { | ||
1713 | data = er32(STATUS); | ||
1714 | data &= E1000_STATUS_LAN_INIT_DONE; | ||
1715 | udelay(100); | ||
1716 | } while ((!data) && --loop); | ||
1717 | |||
1718 | /* | ||
1719 | * If basic configuration is incomplete before the above loop | ||
1720 | * count reaches 0, loading the configuration from NVM will | ||
1721 | * leave the PHY in a bad state possibly resulting in no link. | ||
1722 | */ | ||
1723 | if (loop == 0) | ||
1724 | e_dbg("LAN_INIT_DONE not set, increase timeout\n"); | ||
1725 | |||
1726 | /* Clear the Init Done bit for the next init event */ | ||
1727 | data = er32(STATUS); | ||
1728 | data &= ~E1000_STATUS_LAN_INIT_DONE; | ||
1729 | ew32(STATUS, data); | ||
1730 | } | ||
1731 | |||
1732 | /** | ||
1733 | * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset | ||
1734 | * @hw: pointer to the HW structure | ||
1735 | **/ | ||
1736 | static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) | ||
1737 | { | ||
1738 | s32 ret_val = 0; | ||
1739 | u16 reg; | ||
1740 | |||
1741 | if (e1000_check_reset_block(hw)) | ||
1742 | goto out; | ||
1743 | |||
1744 | /* Allow time for h/w to get to quiescent state after reset */ | ||
1745 | usleep_range(10000, 20000); | ||
1746 | |||
1747 | /* Perform any necessary post-reset workarounds */ | ||
1748 | switch (hw->mac.type) { | ||
1749 | case e1000_pchlan: | ||
1750 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | ||
1751 | if (ret_val) | ||
1752 | goto out; | ||
1753 | break; | ||
1754 | case e1000_pch2lan: | ||
1755 | ret_val = e1000_lv_phy_workarounds_ich8lan(hw); | ||
1756 | if (ret_val) | ||
1757 | goto out; | ||
1758 | break; | ||
1759 | default: | ||
1760 | break; | ||
1761 | } | ||
1762 | |||
1763 | /* Clear the host wakeup bit after lcd reset */ | ||
1764 | if (hw->mac.type >= e1000_pchlan) { | ||
1765 | e1e_rphy(hw, BM_PORT_GEN_CFG, ®); | ||
1766 | reg &= ~BM_WUC_HOST_WU_BIT; | ||
1767 | e1e_wphy(hw, BM_PORT_GEN_CFG, reg); | ||
1768 | } | ||
1769 | |||
1770 | /* Configure the LCD with the extended configuration region in NVM */ | ||
1771 | ret_val = e1000_sw_lcd_config_ich8lan(hw); | ||
1772 | if (ret_val) | ||
1773 | goto out; | ||
1774 | |||
1775 | /* Configure the LCD with the OEM bits in NVM */ | ||
1776 | ret_val = e1000_oem_bits_config_ich8lan(hw, true); | ||
1777 | |||
1778 | if (hw->mac.type == e1000_pch2lan) { | ||
1779 | /* Ungate automatic PHY configuration on non-managed 82579 */ | ||
1780 | if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { | ||
1781 | usleep_range(10000, 20000); | ||
1782 | e1000_gate_hw_phy_config_ich8lan(hw, false); | ||
1783 | } | ||
1784 | |||
1785 | /* Set EEE LPI Update Timer to 200usec */ | ||
1786 | ret_val = hw->phy.ops.acquire(hw); | ||
1787 | if (ret_val) | ||
1788 | goto out; | ||
1789 | ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, | ||
1790 | I82579_LPI_UPDATE_TIMER); | ||
1791 | if (ret_val) | ||
1792 | goto release; | ||
1793 | ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, | ||
1794 | 0x1387); | ||
1795 | release: | ||
1796 | hw->phy.ops.release(hw); | ||
1797 | } | ||
1798 | |||
1799 | out: | ||
1800 | return ret_val; | ||
1801 | } | ||
1802 | |||
1803 | /** | ||
1804 | * e1000_phy_hw_reset_ich8lan - Performs a PHY reset | ||
1805 | * @hw: pointer to the HW structure | ||
1806 | * | ||
1807 | * Resets the PHY | ||
1808 | * This is a function pointer entry point called by drivers | ||
1809 | * or other shared routines. | ||
1810 | **/ | ||
1811 | static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | ||
1812 | { | ||
1813 | s32 ret_val = 0; | ||
1814 | |||
1815 | /* Gate automatic PHY configuration by hardware on non-managed 82579 */ | ||
1816 | if ((hw->mac.type == e1000_pch2lan) && | ||
1817 | !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
1818 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
1819 | |||
1820 | ret_val = e1000e_phy_hw_reset_generic(hw); | ||
1821 | if (ret_val) | ||
1822 | goto out; | ||
1823 | |||
1824 | ret_val = e1000_post_phy_reset_ich8lan(hw); | ||
1825 | |||
1826 | out: | ||
1827 | return ret_val; | ||
1828 | } | ||
1829 | |||
1830 | /** | ||
1831 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state | ||
1832 | * @hw: pointer to the HW structure | ||
1833 | * @active: true to enable LPLU, false to disable | ||
1834 | * | ||
1835 | * Sets the LPLU state according to the active flag. For PCH, if OEM write | ||
1836 | * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set | ||
1837 | * the phy speed. This function will manually set the LPLU bit and restart | ||
1838 | * auto-neg as hw would do. D3 and D0 LPLU will call the same function | ||
1839 | * since it configures the same bit. | ||
1840 | **/ | ||
1841 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) | ||
1842 | { | ||
1843 | s32 ret_val = 0; | ||
1844 | u16 oem_reg; | ||
1845 | |||
1846 | ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); | ||
1847 | if (ret_val) | ||
1848 | goto out; | ||
1849 | |||
1850 | if (active) | ||
1851 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1852 | else | ||
1853 | oem_reg &= ~HV_OEM_BITS_LPLU; | ||
1854 | |||
1855 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
1856 | ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); | ||
1857 | |||
1858 | out: | ||
1859 | return ret_val; | ||
1860 | } | ||
1861 | |||
1862 | /** | ||
1863 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state | ||
1864 | * @hw: pointer to the HW structure | ||
1865 | * @active: true to enable LPLU, false to disable | ||
1866 | * | ||
1867 | * Sets the LPLU D0 state according to the active flag. When | ||
1868 | * activating LPLU this function also disables smart speed | ||
1869 | * and vice versa. LPLU will not be activated unless the | ||
1870 | * device autonegotiation advertisement meets standards of | ||
1871 | * either 10 or 10/100 or 10/100/1000 at all duplexes. | ||
1872 | * This is a function pointer entry point only called by | ||
1873 | * PHY setup routines. | ||
1874 | **/ | ||
1875 | static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | ||
1876 | { | ||
1877 | struct e1000_phy_info *phy = &hw->phy; | ||
1878 | u32 phy_ctrl; | ||
1879 | s32 ret_val = 0; | ||
1880 | u16 data; | ||
1881 | |||
1882 | if (phy->type == e1000_phy_ife) | ||
1883 | return ret_val; | ||
1884 | |||
1885 | phy_ctrl = er32(PHY_CTRL); | ||
1886 | |||
1887 | if (active) { | ||
1888 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | ||
1889 | ew32(PHY_CTRL, phy_ctrl); | ||
1890 | |||
1891 | if (phy->type != e1000_phy_igp_3) | ||
1892 | return 0; | ||
1893 | |||
1894 | /* | ||
1895 | * Call gig speed drop workaround on LPLU before accessing | ||
1896 | * any PHY registers | ||
1897 | */ | ||
1898 | if (hw->mac.type == e1000_ich8lan) | ||
1899 | e1000e_gig_downshift_workaround_ich8lan(hw); | ||
1900 | |||
1901 | /* When LPLU is enabled, we should disable SmartSpeed */ | ||
1902 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | ||
1903 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
1904 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | ||
1905 | if (ret_val) | ||
1906 | return ret_val; | ||
1907 | } else { | ||
1908 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | ||
1909 | ew32(PHY_CTRL, phy_ctrl); | ||
1910 | |||
1911 | if (phy->type != e1000_phy_igp_3) | ||
1912 | return 0; | ||
1913 | |||
1914 | /* | ||
1915 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
1916 | * during Dx states where the power conservation is most | ||
1917 | * important. During driver activity we should enable | ||
1918 | * SmartSpeed, so performance is maintained. | ||
1919 | */ | ||
1920 | if (phy->smart_speed == e1000_smart_speed_on) { | ||
1921 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1922 | &data); | ||
1923 | if (ret_val) | ||
1924 | return ret_val; | ||
1925 | |||
1926 | data |= IGP01E1000_PSCFR_SMART_SPEED; | ||
1927 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1928 | data); | ||
1929 | if (ret_val) | ||
1930 | return ret_val; | ||
1931 | } else if (phy->smart_speed == e1000_smart_speed_off) { | ||
1932 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1933 | &data); | ||
1934 | if (ret_val) | ||
1935 | return ret_val; | ||
1936 | |||
1937 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
1938 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1939 | data); | ||
1940 | if (ret_val) | ||
1941 | return ret_val; | ||
1942 | } | ||
1943 | } | ||
1944 | |||
1945 | return 0; | ||
1946 | } | ||
1947 | |||
1948 | /** | ||
1949 | * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state | ||
1950 | * @hw: pointer to the HW structure | ||
1951 | * @active: true to enable LPLU, false to disable | ||
1952 | * | ||
1953 | * Sets the LPLU D3 state according to the active flag. When | ||
1954 | * activating LPLU this function also disables smart speed | ||
1955 | * and vice versa. LPLU will not be activated unless the | ||
1956 | * device autonegotiation advertisement meets standards of | ||
1957 | * either 10 or 10/100 or 10/100/1000 at all duplexes. | ||
1958 | * This is a function pointer entry point only called by | ||
1959 | * PHY setup routines. | ||
1960 | **/ | ||
1961 | static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | ||
1962 | { | ||
1963 | struct e1000_phy_info *phy = &hw->phy; | ||
1964 | u32 phy_ctrl; | ||
1965 | s32 ret_val; | ||
1966 | u16 data; | ||
1967 | |||
1968 | phy_ctrl = er32(PHY_CTRL); | ||
1969 | |||
1970 | if (!active) { | ||
1971 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | ||
1972 | ew32(PHY_CTRL, phy_ctrl); | ||
1973 | |||
1974 | if (phy->type != e1000_phy_igp_3) | ||
1975 | return 0; | ||
1976 | |||
1977 | /* | ||
1978 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
1979 | * during Dx states where the power conservation is most | ||
1980 | * important. During driver activity we should enable | ||
1981 | * SmartSpeed, so performance is maintained. | ||
1982 | */ | ||
1983 | if (phy->smart_speed == e1000_smart_speed_on) { | ||
1984 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1985 | &data); | ||
1986 | if (ret_val) | ||
1987 | return ret_val; | ||
1988 | |||
1989 | data |= IGP01E1000_PSCFR_SMART_SPEED; | ||
1990 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1991 | data); | ||
1992 | if (ret_val) | ||
1993 | return ret_val; | ||
1994 | } else if (phy->smart_speed == e1000_smart_speed_off) { | ||
1995 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1996 | &data); | ||
1997 | if (ret_val) | ||
1998 | return ret_val; | ||
1999 | |||
2000 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
2001 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
2002 | data); | ||
2003 | if (ret_val) | ||
2004 | return ret_val; | ||
2005 | } | ||
2006 | } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || | ||
2007 | (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || | ||
2008 | (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { | ||
2009 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | ||
2010 | ew32(PHY_CTRL, phy_ctrl); | ||
2011 | |||
2012 | if (phy->type != e1000_phy_igp_3) | ||
2013 | return 0; | ||
2014 | |||
2015 | /* | ||
2016 | * Call gig speed drop workaround on LPLU before accessing | ||
2017 | * any PHY registers | ||
2018 | */ | ||
2019 | if (hw->mac.type == e1000_ich8lan) | ||
2020 | e1000e_gig_downshift_workaround_ich8lan(hw); | ||
2021 | |||
2022 | /* When LPLU is enabled, we should disable SmartSpeed */ | ||
2023 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | ||
2024 | if (ret_val) | ||
2025 | return ret_val; | ||
2026 | |||
2027 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
2028 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | ||
2029 | } | ||
2030 | |||
2031 | return 0; | ||
2032 | } | ||
2033 | |||
2034 | /** | ||
2035 | * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 | ||
2036 | * @hw: pointer to the HW structure | ||
2037 | * @bank: pointer to the variable that returns the active bank | ||
2038 | * | ||
2039 | * Reads signature byte from the NVM using the flash access registers. | ||
2040 | * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. | ||
2041 | **/ | ||
2042 | static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | ||
2043 | { | ||
2044 | u32 eecd; | ||
2045 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2046 | u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); | ||
2047 | u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; | ||
2048 | u8 sig_byte = 0; | ||
2049 | s32 ret_val = 0; | ||
2050 | |||
2051 | switch (hw->mac.type) { | ||
2052 | case e1000_ich8lan: | ||
2053 | case e1000_ich9lan: | ||
2054 | eecd = er32(EECD); | ||
2055 | if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == | ||
2056 | E1000_EECD_SEC1VAL_VALID_MASK) { | ||
2057 | if (eecd & E1000_EECD_SEC1VAL) | ||
2058 | *bank = 1; | ||
2059 | else | ||
2060 | *bank = 0; | ||
2061 | |||
2062 | return 0; | ||
2063 | } | ||
2064 | e_dbg("Unable to determine valid NVM bank via EEC - " | ||
2065 | "reading flash signature\n"); | ||
2066 | /* fall-thru */ | ||
2067 | default: | ||
2068 | /* set bank to 0 in case flash read fails */ | ||
2069 | *bank = 0; | ||
2070 | |||
2071 | /* Check bank 0 */ | ||
2072 | ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, | ||
2073 | &sig_byte); | ||
2074 | if (ret_val) | ||
2075 | return ret_val; | ||
2076 | if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == | ||
2077 | E1000_ICH_NVM_SIG_VALUE) { | ||
2078 | *bank = 0; | ||
2079 | return 0; | ||
2080 | } | ||
2081 | |||
2082 | /* Check bank 1 */ | ||
2083 | ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + | ||
2084 | bank1_offset, | ||
2085 | &sig_byte); | ||
2086 | if (ret_val) | ||
2087 | return ret_val; | ||
2088 | if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == | ||
2089 | E1000_ICH_NVM_SIG_VALUE) { | ||
2090 | *bank = 1; | ||
2091 | return 0; | ||
2092 | } | ||
2093 | |||
2094 | e_dbg("ERROR: No valid NVM bank present\n"); | ||
2095 | return -E1000_ERR_NVM; | ||
2096 | } | ||
2097 | |||
2098 | return 0; | ||
2099 | } | ||
2100 | |||
2101 | /** | ||
2102 | * e1000_read_nvm_ich8lan - Read word(s) from the NVM | ||
2103 | * @hw: pointer to the HW structure | ||
2104 | * @offset: The offset (in bytes) of the word(s) to read. | ||
2105 | * @words: Size of data to read in words | ||
2106 | * @data: Pointer to the word(s) to read at offset. | ||
2107 | * | ||
2108 | * Reads a word(s) from the NVM using the flash access registers. | ||
2109 | **/ | ||
2110 | static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | ||
2111 | u16 *data) | ||
2112 | { | ||
2113 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2114 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
2115 | u32 act_offset; | ||
2116 | s32 ret_val = 0; | ||
2117 | u32 bank = 0; | ||
2118 | u16 i, word; | ||
2119 | |||
2120 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | ||
2121 | (words == 0)) { | ||
2122 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
2123 | ret_val = -E1000_ERR_NVM; | ||
2124 | goto out; | ||
2125 | } | ||
2126 | |||
2127 | nvm->ops.acquire(hw); | ||
2128 | |||
2129 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | ||
2130 | if (ret_val) { | ||
2131 | e_dbg("Could not detect valid bank, assuming bank 0\n"); | ||
2132 | bank = 0; | ||
2133 | } | ||
2134 | |||
2135 | act_offset = (bank) ? nvm->flash_bank_size : 0; | ||
2136 | act_offset += offset; | ||
2137 | |||
2138 | ret_val = 0; | ||
2139 | for (i = 0; i < words; i++) { | ||
2140 | if (dev_spec->shadow_ram[offset+i].modified) { | ||
2141 | data[i] = dev_spec->shadow_ram[offset+i].value; | ||
2142 | } else { | ||
2143 | ret_val = e1000_read_flash_word_ich8lan(hw, | ||
2144 | act_offset + i, | ||
2145 | &word); | ||
2146 | if (ret_val) | ||
2147 | break; | ||
2148 | data[i] = word; | ||
2149 | } | ||
2150 | } | ||
2151 | |||
2152 | nvm->ops.release(hw); | ||
2153 | |||
2154 | out: | ||
2155 | if (ret_val) | ||
2156 | e_dbg("NVM read error: %d\n", ret_val); | ||
2157 | |||
2158 | return ret_val; | ||
2159 | } | ||
2160 | |||
2161 | /** | ||
2162 | * e1000_flash_cycle_init_ich8lan - Initialize flash | ||
2163 | * @hw: pointer to the HW structure | ||
2164 | * | ||
2165 | * This function does initial flash setup so that a new read/write/erase cycle | ||
2166 | * can be started. | ||
2167 | **/ | ||
2168 | static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | ||
2169 | { | ||
2170 | union ich8_hws_flash_status hsfsts; | ||
2171 | s32 ret_val = -E1000_ERR_NVM; | ||
2172 | |||
2173 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2174 | |||
2175 | /* Check if the flash descriptor is valid */ | ||
2176 | if (hsfsts.hsf_status.fldesvalid == 0) { | ||
2177 | e_dbg("Flash descriptor invalid. " | ||
2178 | "SW Sequencing must be used.\n"); | ||
2179 | return -E1000_ERR_NVM; | ||
2180 | } | ||
2181 | |||
2182 | /* Clear FCERR and DAEL in hw status by writing 1 */ | ||
2183 | hsfsts.hsf_status.flcerr = 1; | ||
2184 | hsfsts.hsf_status.dael = 1; | ||
2185 | |||
2186 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
2187 | |||
2188 | /* | ||
2189 | * Either we should have a hardware SPI cycle in progress | ||
2190 | * bit to check against, in order to start a new cycle or | ||
2191 | * FDONE bit should be changed in the hardware so that it | ||
2192 | * is 1 after hardware reset, which can then be used as an | ||
2193 | * indication whether a cycle is in progress or has been | ||
2194 | * completed. | ||
2195 | */ | ||
2196 | |||
2197 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
2198 | /* | ||
2199 | * There is no cycle running at present, | ||
2200 | * so we can start a cycle. | ||
2201 | * Begin by setting Flash Cycle Done. | ||
2202 | */ | ||
2203 | hsfsts.hsf_status.flcdone = 1; | ||
2204 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
2205 | ret_val = 0; | ||
2206 | } else { | ||
2207 | s32 i = 0; | ||
2208 | |||
2209 | /* | ||
2210 | * Otherwise poll for sometime so the current | ||
2211 | * cycle has a chance to end before giving up. | ||
2212 | */ | ||
2213 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { | ||
2214 | hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); | ||
2215 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
2216 | ret_val = 0; | ||
2217 | break; | ||
2218 | } | ||
2219 | udelay(1); | ||
2220 | } | ||
2221 | if (ret_val == 0) { | ||
2222 | /* | ||
2223 | * Successful in waiting for previous cycle to timeout, | ||
2224 | * now set the Flash Cycle Done. | ||
2225 | */ | ||
2226 | hsfsts.hsf_status.flcdone = 1; | ||
2227 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
2228 | } else { | ||
2229 | e_dbg("Flash controller busy, cannot get access\n"); | ||
2230 | } | ||
2231 | } | ||
2232 | |||
2233 | return ret_val; | ||
2234 | } | ||
2235 | |||
2236 | /** | ||
2237 | * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) | ||
2238 | * @hw: pointer to the HW structure | ||
2239 | * @timeout: maximum time to wait for completion | ||
2240 | * | ||
2241 | * This function starts a flash cycle and waits for its completion. | ||
2242 | **/ | ||
2243 | static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) | ||
2244 | { | ||
2245 | union ich8_hws_flash_ctrl hsflctl; | ||
2246 | union ich8_hws_flash_status hsfsts; | ||
2247 | s32 ret_val = -E1000_ERR_NVM; | ||
2248 | u32 i = 0; | ||
2249 | |||
2250 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | ||
2251 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
2252 | hsflctl.hsf_ctrl.flcgo = 1; | ||
2253 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
2254 | |||
2255 | /* wait till FDONE bit is set to 1 */ | ||
2256 | do { | ||
2257 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2258 | if (hsfsts.hsf_status.flcdone == 1) | ||
2259 | break; | ||
2260 | udelay(1); | ||
2261 | } while (i++ < timeout); | ||
2262 | |||
2263 | if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) | ||
2264 | return 0; | ||
2265 | |||
2266 | return ret_val; | ||
2267 | } | ||
2268 | |||
2269 | /** | ||
2270 | * e1000_read_flash_word_ich8lan - Read word from flash | ||
2271 | * @hw: pointer to the HW structure | ||
2272 | * @offset: offset to data location | ||
2273 | * @data: pointer to the location for storing the data | ||
2274 | * | ||
2275 | * Reads the flash word at offset into data. Offset is converted | ||
2276 | * to bytes before read. | ||
2277 | **/ | ||
2278 | static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, | ||
2279 | u16 *data) | ||
2280 | { | ||
2281 | /* Must convert offset into bytes. */ | ||
2282 | offset <<= 1; | ||
2283 | |||
2284 | return e1000_read_flash_data_ich8lan(hw, offset, 2, data); | ||
2285 | } | ||
2286 | |||
2287 | /** | ||
2288 | * e1000_read_flash_byte_ich8lan - Read byte from flash | ||
2289 | * @hw: pointer to the HW structure | ||
2290 | * @offset: The offset of the byte to read. | ||
2291 | * @data: Pointer to a byte to store the value read. | ||
2292 | * | ||
2293 | * Reads a single byte from the NVM using the flash access registers. | ||
2294 | **/ | ||
2295 | static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, | ||
2296 | u8 *data) | ||
2297 | { | ||
2298 | s32 ret_val; | ||
2299 | u16 word = 0; | ||
2300 | |||
2301 | ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); | ||
2302 | if (ret_val) | ||
2303 | return ret_val; | ||
2304 | |||
2305 | *data = (u8)word; | ||
2306 | |||
2307 | return 0; | ||
2308 | } | ||
2309 | |||
2310 | /** | ||
2311 | * e1000_read_flash_data_ich8lan - Read byte or word from NVM | ||
2312 | * @hw: pointer to the HW structure | ||
2313 | * @offset: The offset (in bytes) of the byte or word to read. | ||
2314 | * @size: Size of data to read, 1=byte 2=word | ||
2315 | * @data: Pointer to the word to store the value read. | ||
2316 | * | ||
2317 | * Reads a byte or word from the NVM using the flash access registers. | ||
2318 | **/ | ||
2319 | static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | ||
2320 | u8 size, u16 *data) | ||
2321 | { | ||
2322 | union ich8_hws_flash_status hsfsts; | ||
2323 | union ich8_hws_flash_ctrl hsflctl; | ||
2324 | u32 flash_linear_addr; | ||
2325 | u32 flash_data = 0; | ||
2326 | s32 ret_val = -E1000_ERR_NVM; | ||
2327 | u8 count = 0; | ||
2328 | |||
2329 | if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) | ||
2330 | return -E1000_ERR_NVM; | ||
2331 | |||
2332 | flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + | ||
2333 | hw->nvm.flash_base_addr; | ||
2334 | |||
2335 | do { | ||
2336 | udelay(1); | ||
2337 | /* Steps */ | ||
2338 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | ||
2339 | if (ret_val != 0) | ||
2340 | break; | ||
2341 | |||
2342 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
2343 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
2344 | hsflctl.hsf_ctrl.fldbcount = size - 1; | ||
2345 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; | ||
2346 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
2347 | |||
2348 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | ||
2349 | |||
2350 | ret_val = e1000_flash_cycle_ich8lan(hw, | ||
2351 | ICH_FLASH_READ_COMMAND_TIMEOUT); | ||
2352 | |||
2353 | /* | ||
2354 | * Check if FCERR is set to 1, if set to 1, clear it | ||
2355 | * and try the whole sequence a few more times, else | ||
2356 | * read in (shift in) the Flash Data0, the order is | ||
2357 | * least significant byte first msb to lsb | ||
2358 | */ | ||
2359 | if (ret_val == 0) { | ||
2360 | flash_data = er32flash(ICH_FLASH_FDATA0); | ||
2361 | if (size == 1) | ||
2362 | *data = (u8)(flash_data & 0x000000FF); | ||
2363 | else if (size == 2) | ||
2364 | *data = (u16)(flash_data & 0x0000FFFF); | ||
2365 | break; | ||
2366 | } else { | ||
2367 | /* | ||
2368 | * If we've gotten here, then things are probably | ||
2369 | * completely hosed, but if the error condition is | ||
2370 | * detected, it won't hurt to give it another try... | ||
2371 | * ICH_FLASH_CYCLE_REPEAT_COUNT times. | ||
2372 | */ | ||
2373 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2374 | if (hsfsts.hsf_status.flcerr == 1) { | ||
2375 | /* Repeat for some time before giving up. */ | ||
2376 | continue; | ||
2377 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
2378 | e_dbg("Timeout error - flash cycle " | ||
2379 | "did not complete.\n"); | ||
2380 | break; | ||
2381 | } | ||
2382 | } | ||
2383 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); | ||
2384 | |||
2385 | return ret_val; | ||
2386 | } | ||
2387 | |||
2388 | /** | ||
2389 | * e1000_write_nvm_ich8lan - Write word(s) to the NVM | ||
2390 | * @hw: pointer to the HW structure | ||
2391 | * @offset: The offset (in bytes) of the word(s) to write. | ||
2392 | * @words: Size of data to write in words | ||
2393 | * @data: Pointer to the word(s) to write at offset. | ||
2394 | * | ||
2395 | * Writes a byte or word to the NVM using the flash access registers. | ||
2396 | **/ | ||
2397 | static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | ||
2398 | u16 *data) | ||
2399 | { | ||
2400 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2401 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
2402 | u16 i; | ||
2403 | |||
2404 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | ||
2405 | (words == 0)) { | ||
2406 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
2407 | return -E1000_ERR_NVM; | ||
2408 | } | ||
2409 | |||
2410 | nvm->ops.acquire(hw); | ||
2411 | |||
2412 | for (i = 0; i < words; i++) { | ||
2413 | dev_spec->shadow_ram[offset+i].modified = true; | ||
2414 | dev_spec->shadow_ram[offset+i].value = data[i]; | ||
2415 | } | ||
2416 | |||
2417 | nvm->ops.release(hw); | ||
2418 | |||
2419 | return 0; | ||
2420 | } | ||
2421 | |||
2422 | /** | ||
2423 | * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM | ||
2424 | * @hw: pointer to the HW structure | ||
2425 | * | ||
2426 | * The NVM checksum is updated by calling the generic update_nvm_checksum, | ||
2427 | * which writes the checksum to the shadow ram. The changes in the shadow | ||
2428 | * ram are then committed to the EEPROM by processing each bank at a time | ||
2429 | * checking for the modified bit and writing only the pending changes. | ||
2430 | * After a successful commit, the shadow ram is cleared and is ready for | ||
2431 | * future writes. | ||
2432 | **/ | ||
2433 | static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | ||
2434 | { | ||
2435 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2436 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
2437 | u32 i, act_offset, new_bank_offset, old_bank_offset, bank; | ||
2438 | s32 ret_val; | ||
2439 | u16 data; | ||
2440 | |||
2441 | ret_val = e1000e_update_nvm_checksum_generic(hw); | ||
2442 | if (ret_val) | ||
2443 | goto out; | ||
2444 | |||
2445 | if (nvm->type != e1000_nvm_flash_sw) | ||
2446 | goto out; | ||
2447 | |||
2448 | nvm->ops.acquire(hw); | ||
2449 | |||
2450 | /* | ||
2451 | * We're writing to the opposite bank so if we're on bank 1, | ||
2452 | * write to bank 0 etc. We also need to erase the segment that | ||
2453 | * is going to be written | ||
2454 | */ | ||
2455 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | ||
2456 | if (ret_val) { | ||
2457 | e_dbg("Could not detect valid bank, assuming bank 0\n"); | ||
2458 | bank = 0; | ||
2459 | } | ||
2460 | |||
2461 | if (bank == 0) { | ||
2462 | new_bank_offset = nvm->flash_bank_size; | ||
2463 | old_bank_offset = 0; | ||
2464 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | ||
2465 | if (ret_val) | ||
2466 | goto release; | ||
2467 | } else { | ||
2468 | old_bank_offset = nvm->flash_bank_size; | ||
2469 | new_bank_offset = 0; | ||
2470 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | ||
2471 | if (ret_val) | ||
2472 | goto release; | ||
2473 | } | ||
2474 | |||
2475 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | ||
2476 | /* | ||
2477 | * Determine whether to write the value stored | ||
2478 | * in the other NVM bank or a modified value stored | ||
2479 | * in the shadow RAM | ||
2480 | */ | ||
2481 | if (dev_spec->shadow_ram[i].modified) { | ||
2482 | data = dev_spec->shadow_ram[i].value; | ||
2483 | } else { | ||
2484 | ret_val = e1000_read_flash_word_ich8lan(hw, i + | ||
2485 | old_bank_offset, | ||
2486 | &data); | ||
2487 | if (ret_val) | ||
2488 | break; | ||
2489 | } | ||
2490 | |||
2491 | /* | ||
2492 | * If the word is 0x13, then make sure the signature bits | ||
2493 | * (15:14) are 11b until the commit has completed. | ||
2494 | * This will allow us to write 10b which indicates the | ||
2495 | * signature is valid. We want to do this after the write | ||
2496 | * has completed so that we don't mark the segment valid | ||
2497 | * while the write is still in progress | ||
2498 | */ | ||
2499 | if (i == E1000_ICH_NVM_SIG_WORD) | ||
2500 | data |= E1000_ICH_NVM_SIG_MASK; | ||
2501 | |||
2502 | /* Convert offset to bytes. */ | ||
2503 | act_offset = (i + new_bank_offset) << 1; | ||
2504 | |||
2505 | udelay(100); | ||
2506 | /* Write the bytes to the new bank. */ | ||
2507 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, | ||
2508 | act_offset, | ||
2509 | (u8)data); | ||
2510 | if (ret_val) | ||
2511 | break; | ||
2512 | |||
2513 | udelay(100); | ||
2514 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, | ||
2515 | act_offset + 1, | ||
2516 | (u8)(data >> 8)); | ||
2517 | if (ret_val) | ||
2518 | break; | ||
2519 | } | ||
2520 | |||
2521 | /* | ||
2522 | * Don't bother writing the segment valid bits if sector | ||
2523 | * programming failed. | ||
2524 | */ | ||
2525 | if (ret_val) { | ||
2526 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | ||
2527 | e_dbg("Flash commit failed.\n"); | ||
2528 | goto release; | ||
2529 | } | ||
2530 | |||
2531 | /* | ||
2532 | * Finally validate the new segment by setting bit 15:14 | ||
2533 | * to 10b in word 0x13 , this can be done without an | ||
2534 | * erase as well since these bits are 11 to start with | ||
2535 | * and we need to change bit 14 to 0b | ||
2536 | */ | ||
2537 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | ||
2538 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); | ||
2539 | if (ret_val) | ||
2540 | goto release; | ||
2541 | |||
2542 | data &= 0xBFFF; | ||
2543 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, | ||
2544 | act_offset * 2 + 1, | ||
2545 | (u8)(data >> 8)); | ||
2546 | if (ret_val) | ||
2547 | goto release; | ||
2548 | |||
2549 | /* | ||
2550 | * And invalidate the previously valid segment by setting | ||
2551 | * its signature word (0x13) high_byte to 0b. This can be | ||
2552 | * done without an erase because flash erase sets all bits | ||
2553 | * to 1's. We can write 1's to 0's without an erase | ||
2554 | */ | ||
2555 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | ||
2556 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | ||
2557 | if (ret_val) | ||
2558 | goto release; | ||
2559 | |||
2560 | /* Great! Everything worked, we can now clear the cached entries. */ | ||
2561 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | ||
2562 | dev_spec->shadow_ram[i].modified = false; | ||
2563 | dev_spec->shadow_ram[i].value = 0xFFFF; | ||
2564 | } | ||
2565 | |||
2566 | release: | ||
2567 | nvm->ops.release(hw); | ||
2568 | |||
2569 | /* | ||
2570 | * Reload the EEPROM, or else modifications will not appear | ||
2571 | * until after the next adapter reset. | ||
2572 | */ | ||
2573 | if (!ret_val) { | ||
2574 | e1000e_reload_nvm(hw); | ||
2575 | usleep_range(10000, 20000); | ||
2576 | } | ||
2577 | |||
2578 | out: | ||
2579 | if (ret_val) | ||
2580 | e_dbg("NVM update error: %d\n", ret_val); | ||
2581 | |||
2582 | return ret_val; | ||
2583 | } | ||
2584 | |||
2585 | /** | ||
2586 | * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum | ||
2587 | * @hw: pointer to the HW structure | ||
2588 | * | ||
2589 | * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. | ||
2590 | * If the bit is 0, that the EEPROM had been modified, but the checksum was not | ||
2591 | * calculated, in which case we need to calculate the checksum and set bit 6. | ||
2592 | **/ | ||
2593 | static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) | ||
2594 | { | ||
2595 | s32 ret_val; | ||
2596 | u16 data; | ||
2597 | |||
2598 | /* | ||
2599 | * Read 0x19 and check bit 6. If this bit is 0, the checksum | ||
2600 | * needs to be fixed. This bit is an indication that the NVM | ||
2601 | * was prepared by OEM software and did not calculate the | ||
2602 | * checksum...a likely scenario. | ||
2603 | */ | ||
2604 | ret_val = e1000_read_nvm(hw, 0x19, 1, &data); | ||
2605 | if (ret_val) | ||
2606 | return ret_val; | ||
2607 | |||
2608 | if ((data & 0x40) == 0) { | ||
2609 | data |= 0x40; | ||
2610 | ret_val = e1000_write_nvm(hw, 0x19, 1, &data); | ||
2611 | if (ret_val) | ||
2612 | return ret_val; | ||
2613 | ret_val = e1000e_update_nvm_checksum(hw); | ||
2614 | if (ret_val) | ||
2615 | return ret_val; | ||
2616 | } | ||
2617 | |||
2618 | return e1000e_validate_nvm_checksum_generic(hw); | ||
2619 | } | ||
2620 | |||
2621 | /** | ||
2622 | * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only | ||
2623 | * @hw: pointer to the HW structure | ||
2624 | * | ||
2625 | * To prevent malicious write/erase of the NVM, set it to be read-only | ||
2626 | * so that the hardware ignores all write/erase cycles of the NVM via | ||
2627 | * the flash control registers. The shadow-ram copy of the NVM will | ||
2628 | * still be updated, however any updates to this copy will not stick | ||
2629 | * across driver reloads. | ||
2630 | **/ | ||
2631 | void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | ||
2632 | { | ||
2633 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2634 | union ich8_flash_protected_range pr0; | ||
2635 | union ich8_hws_flash_status hsfsts; | ||
2636 | u32 gfpreg; | ||
2637 | |||
2638 | nvm->ops.acquire(hw); | ||
2639 | |||
2640 | gfpreg = er32flash(ICH_FLASH_GFPREG); | ||
2641 | |||
2642 | /* Write-protect GbE Sector of NVM */ | ||
2643 | pr0.regval = er32flash(ICH_FLASH_PR0); | ||
2644 | pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; | ||
2645 | pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); | ||
2646 | pr0.range.wpe = true; | ||
2647 | ew32flash(ICH_FLASH_PR0, pr0.regval); | ||
2648 | |||
2649 | /* | ||
2650 | * Lock down a subset of GbE Flash Control Registers, e.g. | ||
2651 | * PR0 to prevent the write-protection from being lifted. | ||
2652 | * Once FLOCKDN is set, the registers protected by it cannot | ||
2653 | * be written until FLOCKDN is cleared by a hardware reset. | ||
2654 | */ | ||
2655 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2656 | hsfsts.hsf_status.flockdn = true; | ||
2657 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
2658 | |||
2659 | nvm->ops.release(hw); | ||
2660 | } | ||
2661 | |||
2662 | /** | ||
2663 | * e1000_write_flash_data_ich8lan - Writes bytes to the NVM | ||
2664 | * @hw: pointer to the HW structure | ||
2665 | * @offset: The offset (in bytes) of the byte/word to read. | ||
2666 | * @size: Size of data to read, 1=byte 2=word | ||
2667 | * @data: The byte(s) to write to the NVM. | ||
2668 | * | ||
2669 | * Writes one/two bytes to the NVM using the flash access registers. | ||
2670 | **/ | ||
2671 | static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | ||
2672 | u8 size, u16 data) | ||
2673 | { | ||
2674 | union ich8_hws_flash_status hsfsts; | ||
2675 | union ich8_hws_flash_ctrl hsflctl; | ||
2676 | u32 flash_linear_addr; | ||
2677 | u32 flash_data = 0; | ||
2678 | s32 ret_val; | ||
2679 | u8 count = 0; | ||
2680 | |||
2681 | if (size < 1 || size > 2 || data > size * 0xff || | ||
2682 | offset > ICH_FLASH_LINEAR_ADDR_MASK) | ||
2683 | return -E1000_ERR_NVM; | ||
2684 | |||
2685 | flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + | ||
2686 | hw->nvm.flash_base_addr; | ||
2687 | |||
2688 | do { | ||
2689 | udelay(1); | ||
2690 | /* Steps */ | ||
2691 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | ||
2692 | if (ret_val) | ||
2693 | break; | ||
2694 | |||
2695 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
2696 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
2697 | hsflctl.hsf_ctrl.fldbcount = size -1; | ||
2698 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; | ||
2699 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
2700 | |||
2701 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | ||
2702 | |||
2703 | if (size == 1) | ||
2704 | flash_data = (u32)data & 0x00FF; | ||
2705 | else | ||
2706 | flash_data = (u32)data; | ||
2707 | |||
2708 | ew32flash(ICH_FLASH_FDATA0, flash_data); | ||
2709 | |||
2710 | /* | ||
2711 | * check if FCERR is set to 1 , if set to 1, clear it | ||
2712 | * and try the whole sequence a few more times else done | ||
2713 | */ | ||
2714 | ret_val = e1000_flash_cycle_ich8lan(hw, | ||
2715 | ICH_FLASH_WRITE_COMMAND_TIMEOUT); | ||
2716 | if (!ret_val) | ||
2717 | break; | ||
2718 | |||
2719 | /* | ||
2720 | * If we're here, then things are most likely | ||
2721 | * completely hosed, but if the error condition | ||
2722 | * is detected, it won't hurt to give it another | ||
2723 | * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. | ||
2724 | */ | ||
2725 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2726 | if (hsfsts.hsf_status.flcerr == 1) | ||
2727 | /* Repeat for some time before giving up. */ | ||
2728 | continue; | ||
2729 | if (hsfsts.hsf_status.flcdone == 0) { | ||
2730 | e_dbg("Timeout error - flash cycle " | ||
2731 | "did not complete."); | ||
2732 | break; | ||
2733 | } | ||
2734 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); | ||
2735 | |||
2736 | return ret_val; | ||
2737 | } | ||
2738 | |||
2739 | /** | ||
2740 | * e1000_write_flash_byte_ich8lan - Write a single byte to NVM | ||
2741 | * @hw: pointer to the HW structure | ||
2742 | * @offset: The index of the byte to read. | ||
2743 | * @data: The byte to write to the NVM. | ||
2744 | * | ||
2745 | * Writes a single byte to the NVM using the flash access registers. | ||
2746 | **/ | ||
2747 | static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, | ||
2748 | u8 data) | ||
2749 | { | ||
2750 | u16 word = (u16)data; | ||
2751 | |||
2752 | return e1000_write_flash_data_ich8lan(hw, offset, 1, word); | ||
2753 | } | ||
2754 | |||
2755 | /** | ||
2756 | * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM | ||
2757 | * @hw: pointer to the HW structure | ||
2758 | * @offset: The offset of the byte to write. | ||
2759 | * @byte: The byte to write to the NVM. | ||
2760 | * | ||
2761 | * Writes a single byte to the NVM using the flash access registers. | ||
2762 | * Goes through a retry algorithm before giving up. | ||
2763 | **/ | ||
2764 | static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, | ||
2765 | u32 offset, u8 byte) | ||
2766 | { | ||
2767 | s32 ret_val; | ||
2768 | u16 program_retries; | ||
2769 | |||
2770 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | ||
2771 | if (!ret_val) | ||
2772 | return ret_val; | ||
2773 | |||
2774 | for (program_retries = 0; program_retries < 100; program_retries++) { | ||
2775 | e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); | ||
2776 | udelay(100); | ||
2777 | ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); | ||
2778 | if (!ret_val) | ||
2779 | break; | ||
2780 | } | ||
2781 | if (program_retries == 100) | ||
2782 | return -E1000_ERR_NVM; | ||
2783 | |||
2784 | return 0; | ||
2785 | } | ||
2786 | |||
2787 | /** | ||
2788 | * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM | ||
2789 | * @hw: pointer to the HW structure | ||
2790 | * @bank: 0 for first bank, 1 for second bank, etc. | ||
2791 | * | ||
2792 | * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. | ||
2793 | * bank N is 4096 * N + flash_reg_addr. | ||
2794 | **/ | ||
2795 | static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | ||
2796 | { | ||
2797 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2798 | union ich8_hws_flash_status hsfsts; | ||
2799 | union ich8_hws_flash_ctrl hsflctl; | ||
2800 | u32 flash_linear_addr; | ||
2801 | /* bank size is in 16bit words - adjust to bytes */ | ||
2802 | u32 flash_bank_size = nvm->flash_bank_size * 2; | ||
2803 | s32 ret_val; | ||
2804 | s32 count = 0; | ||
2805 | s32 j, iteration, sector_size; | ||
2806 | |||
2807 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2808 | |||
2809 | /* | ||
2810 | * Determine HW Sector size: Read BERASE bits of hw flash status | ||
2811 | * register | ||
2812 | * 00: The Hw sector is 256 bytes, hence we need to erase 16 | ||
2813 | * consecutive sectors. The start index for the nth Hw sector | ||
2814 | * can be calculated as = bank * 4096 + n * 256 | ||
2815 | * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. | ||
2816 | * The start index for the nth Hw sector can be calculated | ||
2817 | * as = bank * 4096 | ||
2818 | * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 | ||
2819 | * (ich9 only, otherwise error condition) | ||
2820 | * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 | ||
2821 | */ | ||
2822 | switch (hsfsts.hsf_status.berasesz) { | ||
2823 | case 0: | ||
2824 | /* Hw sector size 256 */ | ||
2825 | sector_size = ICH_FLASH_SEG_SIZE_256; | ||
2826 | iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; | ||
2827 | break; | ||
2828 | case 1: | ||
2829 | sector_size = ICH_FLASH_SEG_SIZE_4K; | ||
2830 | iteration = 1; | ||
2831 | break; | ||
2832 | case 2: | ||
2833 | sector_size = ICH_FLASH_SEG_SIZE_8K; | ||
2834 | iteration = 1; | ||
2835 | break; | ||
2836 | case 3: | ||
2837 | sector_size = ICH_FLASH_SEG_SIZE_64K; | ||
2838 | iteration = 1; | ||
2839 | break; | ||
2840 | default: | ||
2841 | return -E1000_ERR_NVM; | ||
2842 | } | ||
2843 | |||
2844 | /* Start with the base address, then add the sector offset. */ | ||
2845 | flash_linear_addr = hw->nvm.flash_base_addr; | ||
2846 | flash_linear_addr += (bank) ? flash_bank_size : 0; | ||
2847 | |||
2848 | for (j = 0; j < iteration ; j++) { | ||
2849 | do { | ||
2850 | /* Steps */ | ||
2851 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | ||
2852 | if (ret_val) | ||
2853 | return ret_val; | ||
2854 | |||
2855 | /* | ||
2856 | * Write a value 11 (block Erase) in Flash | ||
2857 | * Cycle field in hw flash control | ||
2858 | */ | ||
2859 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
2860 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; | ||
2861 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
2862 | |||
2863 | /* | ||
2864 | * Write the last 24 bits of an index within the | ||
2865 | * block into Flash Linear address field in Flash | ||
2866 | * Address. | ||
2867 | */ | ||
2868 | flash_linear_addr += (j * sector_size); | ||
2869 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | ||
2870 | |||
2871 | ret_val = e1000_flash_cycle_ich8lan(hw, | ||
2872 | ICH_FLASH_ERASE_COMMAND_TIMEOUT); | ||
2873 | if (ret_val == 0) | ||
2874 | break; | ||
2875 | |||
2876 | /* | ||
2877 | * Check if FCERR is set to 1. If 1, | ||
2878 | * clear it and try the whole sequence | ||
2879 | * a few more times else Done | ||
2880 | */ | ||
2881 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
2882 | if (hsfsts.hsf_status.flcerr == 1) | ||
2883 | /* repeat for some time before giving up */ | ||
2884 | continue; | ||
2885 | else if (hsfsts.hsf_status.flcdone == 0) | ||
2886 | return ret_val; | ||
2887 | } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); | ||
2888 | } | ||
2889 | |||
2890 | return 0; | ||
2891 | } | ||
2892 | |||
2893 | /** | ||
2894 | * e1000_valid_led_default_ich8lan - Set the default LED settings | ||
2895 | * @hw: pointer to the HW structure | ||
2896 | * @data: Pointer to the LED settings | ||
2897 | * | ||
2898 | * Reads the LED default settings from the NVM to data. If the NVM LED | ||
2899 | * settings is all 0's or F's, set the LED default to a valid LED default | ||
2900 | * setting. | ||
2901 | **/ | ||
2902 | static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) | ||
2903 | { | ||
2904 | s32 ret_val; | ||
2905 | |||
2906 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | ||
2907 | if (ret_val) { | ||
2908 | e_dbg("NVM Read Error\n"); | ||
2909 | return ret_val; | ||
2910 | } | ||
2911 | |||
2912 | if (*data == ID_LED_RESERVED_0000 || | ||
2913 | *data == ID_LED_RESERVED_FFFF) | ||
2914 | *data = ID_LED_DEFAULT_ICH8LAN; | ||
2915 | |||
2916 | return 0; | ||
2917 | } | ||
2918 | |||
2919 | /** | ||
2920 | * e1000_id_led_init_pchlan - store LED configurations | ||
2921 | * @hw: pointer to the HW structure | ||
2922 | * | ||
2923 | * PCH does not control LEDs via the LEDCTL register, rather it uses | ||
2924 | * the PHY LED configuration register. | ||
2925 | * | ||
2926 | * PCH also does not have an "always on" or "always off" mode which | ||
2927 | * complicates the ID feature. Instead of using the "on" mode to indicate | ||
2928 | * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), | ||
2929 | * use "link_up" mode. The LEDs will still ID on request if there is no | ||
2930 | * link based on logic in e1000_led_[on|off]_pchlan(). | ||
2931 | **/ | ||
2932 | static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) | ||
2933 | { | ||
2934 | struct e1000_mac_info *mac = &hw->mac; | ||
2935 | s32 ret_val; | ||
2936 | const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; | ||
2937 | const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; | ||
2938 | u16 data, i, temp, shift; | ||
2939 | |||
2940 | /* Get default ID LED modes */ | ||
2941 | ret_val = hw->nvm.ops.valid_led_default(hw, &data); | ||
2942 | if (ret_val) | ||
2943 | goto out; | ||
2944 | |||
2945 | mac->ledctl_default = er32(LEDCTL); | ||
2946 | mac->ledctl_mode1 = mac->ledctl_default; | ||
2947 | mac->ledctl_mode2 = mac->ledctl_default; | ||
2948 | |||
2949 | for (i = 0; i < 4; i++) { | ||
2950 | temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; | ||
2951 | shift = (i * 5); | ||
2952 | switch (temp) { | ||
2953 | case ID_LED_ON1_DEF2: | ||
2954 | case ID_LED_ON1_ON2: | ||
2955 | case ID_LED_ON1_OFF2: | ||
2956 | mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); | ||
2957 | mac->ledctl_mode1 |= (ledctl_on << shift); | ||
2958 | break; | ||
2959 | case ID_LED_OFF1_DEF2: | ||
2960 | case ID_LED_OFF1_ON2: | ||
2961 | case ID_LED_OFF1_OFF2: | ||
2962 | mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); | ||
2963 | mac->ledctl_mode1 |= (ledctl_off << shift); | ||
2964 | break; | ||
2965 | default: | ||
2966 | /* Do nothing */ | ||
2967 | break; | ||
2968 | } | ||
2969 | switch (temp) { | ||
2970 | case ID_LED_DEF1_ON2: | ||
2971 | case ID_LED_ON1_ON2: | ||
2972 | case ID_LED_OFF1_ON2: | ||
2973 | mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); | ||
2974 | mac->ledctl_mode2 |= (ledctl_on << shift); | ||
2975 | break; | ||
2976 | case ID_LED_DEF1_OFF2: | ||
2977 | case ID_LED_ON1_OFF2: | ||
2978 | case ID_LED_OFF1_OFF2: | ||
2979 | mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); | ||
2980 | mac->ledctl_mode2 |= (ledctl_off << shift); | ||
2981 | break; | ||
2982 | default: | ||
2983 | /* Do nothing */ | ||
2984 | break; | ||
2985 | } | ||
2986 | } | ||
2987 | |||
2988 | out: | ||
2989 | return ret_val; | ||
2990 | } | ||
2991 | |||
2992 | /** | ||
2993 | * e1000_get_bus_info_ich8lan - Get/Set the bus type and width | ||
2994 | * @hw: pointer to the HW structure | ||
2995 | * | ||
2996 | * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability | ||
2997 | * register, so the the bus width is hard coded. | ||
2998 | **/ | ||
2999 | static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) | ||
3000 | { | ||
3001 | struct e1000_bus_info *bus = &hw->bus; | ||
3002 | s32 ret_val; | ||
3003 | |||
3004 | ret_val = e1000e_get_bus_info_pcie(hw); | ||
3005 | |||
3006 | /* | ||
3007 | * ICH devices are "PCI Express"-ish. They have | ||
3008 | * a configuration space, but do not contain | ||
3009 | * PCI Express Capability registers, so bus width | ||
3010 | * must be hardcoded. | ||
3011 | */ | ||
3012 | if (bus->width == e1000_bus_width_unknown) | ||
3013 | bus->width = e1000_bus_width_pcie_x1; | ||
3014 | |||
3015 | return ret_val; | ||
3016 | } | ||
3017 | |||
3018 | /** | ||
3019 | * e1000_reset_hw_ich8lan - Reset the hardware | ||
3020 | * @hw: pointer to the HW structure | ||
3021 | * | ||
3022 | * Does a full reset of the hardware which includes a reset of the PHY and | ||
3023 | * MAC. | ||
3024 | **/ | ||
3025 | static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | ||
3026 | { | ||
3027 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
3028 | u16 reg; | ||
3029 | u32 ctrl, kab; | ||
3030 | s32 ret_val; | ||
3031 | |||
3032 | /* | ||
3033 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
3034 | * on the last TLP read/write transaction when MAC is reset. | ||
3035 | */ | ||
3036 | ret_val = e1000e_disable_pcie_master(hw); | ||
3037 | if (ret_val) | ||
3038 | e_dbg("PCI-E Master disable polling has failed.\n"); | ||
3039 | |||
3040 | e_dbg("Masking off all interrupts\n"); | ||
3041 | ew32(IMC, 0xffffffff); | ||
3042 | |||
3043 | /* | ||
3044 | * Disable the Transmit and Receive units. Then delay to allow | ||
3045 | * any pending transactions to complete before we hit the MAC | ||
3046 | * with the global reset. | ||
3047 | */ | ||
3048 | ew32(RCTL, 0); | ||
3049 | ew32(TCTL, E1000_TCTL_PSP); | ||
3050 | e1e_flush(); | ||
3051 | |||
3052 | usleep_range(10000, 20000); | ||
3053 | |||
3054 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ | ||
3055 | if (hw->mac.type == e1000_ich8lan) { | ||
3056 | /* Set Tx and Rx buffer allocation to 8k apiece. */ | ||
3057 | ew32(PBA, E1000_PBA_8K); | ||
3058 | /* Set Packet Buffer Size to 16k. */ | ||
3059 | ew32(PBS, E1000_PBS_16K); | ||
3060 | } | ||
3061 | |||
3062 | if (hw->mac.type == e1000_pchlan) { | ||
3063 | /* Save the NVM K1 bit setting*/ | ||
3064 | ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); | ||
3065 | if (ret_val) | ||
3066 | return ret_val; | ||
3067 | |||
3068 | if (reg & E1000_NVM_K1_ENABLE) | ||
3069 | dev_spec->nvm_k1_enabled = true; | ||
3070 | else | ||
3071 | dev_spec->nvm_k1_enabled = false; | ||
3072 | } | ||
3073 | |||
3074 | ctrl = er32(CTRL); | ||
3075 | |||
3076 | if (!e1000_check_reset_block(hw)) { | ||
3077 | /* | ||
3078 | * Full-chip reset requires MAC and PHY reset at the same | ||
3079 | * time to make sure the interface between MAC and the | ||
3080 | * external PHY is reset. | ||
3081 | */ | ||
3082 | ctrl |= E1000_CTRL_PHY_RST; | ||
3083 | |||
3084 | /* | ||
3085 | * Gate automatic PHY configuration by hardware on | ||
3086 | * non-managed 82579 | ||
3087 | */ | ||
3088 | if ((hw->mac.type == e1000_pch2lan) && | ||
3089 | !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
3090 | e1000_gate_hw_phy_config_ich8lan(hw, true); | ||
3091 | } | ||
3092 | ret_val = e1000_acquire_swflag_ich8lan(hw); | ||
3093 | e_dbg("Issuing a global reset to ich8lan\n"); | ||
3094 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | ||
3095 | /* cannot issue a flush here because it hangs the hardware */ | ||
3096 | msleep(20); | ||
3097 | |||
3098 | if (!ret_val) | ||
3099 | mutex_unlock(&swflag_mutex); | ||
3100 | |||
3101 | if (ctrl & E1000_CTRL_PHY_RST) { | ||
3102 | ret_val = hw->phy.ops.get_cfg_done(hw); | ||
3103 | if (ret_val) | ||
3104 | goto out; | ||
3105 | |||
3106 | ret_val = e1000_post_phy_reset_ich8lan(hw); | ||
3107 | if (ret_val) | ||
3108 | goto out; | ||
3109 | } | ||
3110 | |||
3111 | /* | ||
3112 | * For PCH, this write will make sure that any noise | ||
3113 | * will be detected as a CRC error and be dropped rather than show up | ||
3114 | * as a bad packet to the DMA engine. | ||
3115 | */ | ||
3116 | if (hw->mac.type == e1000_pchlan) | ||
3117 | ew32(CRC_OFFSET, 0x65656565); | ||
3118 | |||
3119 | ew32(IMC, 0xffffffff); | ||
3120 | er32(ICR); | ||
3121 | |||
3122 | kab = er32(KABGTXD); | ||
3123 | kab |= E1000_KABGTXD_BGSQLBIAS; | ||
3124 | ew32(KABGTXD, kab); | ||
3125 | |||
3126 | out: | ||
3127 | return ret_val; | ||
3128 | } | ||
3129 | |||
3130 | /** | ||
3131 | * e1000_init_hw_ich8lan - Initialize the hardware | ||
3132 | * @hw: pointer to the HW structure | ||
3133 | * | ||
3134 | * Prepares the hardware for transmit and receive by doing the following: | ||
3135 | * - initialize hardware bits | ||
3136 | * - initialize LED identification | ||
3137 | * - setup receive address registers | ||
3138 | * - setup flow control | ||
3139 | * - setup transmit descriptors | ||
3140 | * - clear statistics | ||
3141 | **/ | ||
3142 | static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | ||
3143 | { | ||
3144 | struct e1000_mac_info *mac = &hw->mac; | ||
3145 | u32 ctrl_ext, txdctl, snoop; | ||
3146 | s32 ret_val; | ||
3147 | u16 i; | ||
3148 | |||
3149 | e1000_initialize_hw_bits_ich8lan(hw); | ||
3150 | |||
3151 | /* Initialize identification LED */ | ||
3152 | ret_val = mac->ops.id_led_init(hw); | ||
3153 | if (ret_val) | ||
3154 | e_dbg("Error initializing identification LED\n"); | ||
3155 | /* This is not fatal and we should not stop init due to this */ | ||
3156 | |||
3157 | /* Setup the receive address. */ | ||
3158 | e1000e_init_rx_addrs(hw, mac->rar_entry_count); | ||
3159 | |||
3160 | /* Zero out the Multicast HASH table */ | ||
3161 | e_dbg("Zeroing the MTA\n"); | ||
3162 | for (i = 0; i < mac->mta_reg_count; i++) | ||
3163 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); | ||
3164 | |||
3165 | /* | ||
3166 | * The 82578 Rx buffer will stall if wakeup is enabled in host and | ||
3167 | * the ME. Disable wakeup by clearing the host wakeup bit. | ||
3168 | * Reset the phy after disabling host wakeup to reset the Rx buffer. | ||
3169 | */ | ||
3170 | if (hw->phy.type == e1000_phy_82578) { | ||
3171 | e1e_rphy(hw, BM_PORT_GEN_CFG, &i); | ||
3172 | i &= ~BM_WUC_HOST_WU_BIT; | ||
3173 | e1e_wphy(hw, BM_PORT_GEN_CFG, i); | ||
3174 | ret_val = e1000_phy_hw_reset_ich8lan(hw); | ||
3175 | if (ret_val) | ||
3176 | return ret_val; | ||
3177 | } | ||
3178 | |||
3179 | /* Setup link and flow control */ | ||
3180 | ret_val = e1000_setup_link_ich8lan(hw); | ||
3181 | |||
3182 | /* Set the transmit descriptor write-back policy for both queues */ | ||
3183 | txdctl = er32(TXDCTL(0)); | ||
3184 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | ||
3185 | E1000_TXDCTL_FULL_TX_DESC_WB; | ||
3186 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | ||
3187 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | ||
3188 | ew32(TXDCTL(0), txdctl); | ||
3189 | txdctl = er32(TXDCTL(1)); | ||
3190 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | ||
3191 | E1000_TXDCTL_FULL_TX_DESC_WB; | ||
3192 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | ||
3193 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | ||
3194 | ew32(TXDCTL(1), txdctl); | ||
3195 | |||
3196 | /* | ||
3197 | * ICH8 has opposite polarity of no_snoop bits. | ||
3198 | * By default, we should use snoop behavior. | ||
3199 | */ | ||
3200 | if (mac->type == e1000_ich8lan) | ||
3201 | snoop = PCIE_ICH8_SNOOP_ALL; | ||
3202 | else | ||
3203 | snoop = (u32) ~(PCIE_NO_SNOOP_ALL); | ||
3204 | e1000e_set_pcie_no_snoop(hw, snoop); | ||
3205 | |||
3206 | ctrl_ext = er32(CTRL_EXT); | ||
3207 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | ||
3208 | ew32(CTRL_EXT, ctrl_ext); | ||
3209 | |||
3210 | /* | ||
3211 | * Clear all of the statistics registers (clear on read). It is | ||
3212 | * important that we do this after we have tried to establish link | ||
3213 | * because the symbol error count will increment wildly if there | ||
3214 | * is no link. | ||
3215 | */ | ||
3216 | e1000_clear_hw_cntrs_ich8lan(hw); | ||
3217 | |||
3218 | return 0; | ||
3219 | } | ||
3220 | /** | ||
3221 | * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits | ||
3222 | * @hw: pointer to the HW structure | ||
3223 | * | ||
3224 | * Sets/Clears required hardware bits necessary for correctly setting up the | ||
3225 | * hardware for transmit and receive. | ||
3226 | **/ | ||
3227 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | ||
3228 | { | ||
3229 | u32 reg; | ||
3230 | |||
3231 | /* Extended Device Control */ | ||
3232 | reg = er32(CTRL_EXT); | ||
3233 | reg |= (1 << 22); | ||
3234 | /* Enable PHY low-power state when MAC is at D3 w/o WoL */ | ||
3235 | if (hw->mac.type >= e1000_pchlan) | ||
3236 | reg |= E1000_CTRL_EXT_PHYPDEN; | ||
3237 | ew32(CTRL_EXT, reg); | ||
3238 | |||
3239 | /* Transmit Descriptor Control 0 */ | ||
3240 | reg = er32(TXDCTL(0)); | ||
3241 | reg |= (1 << 22); | ||
3242 | ew32(TXDCTL(0), reg); | ||
3243 | |||
3244 | /* Transmit Descriptor Control 1 */ | ||
3245 | reg = er32(TXDCTL(1)); | ||
3246 | reg |= (1 << 22); | ||
3247 | ew32(TXDCTL(1), reg); | ||
3248 | |||
3249 | /* Transmit Arbitration Control 0 */ | ||
3250 | reg = er32(TARC(0)); | ||
3251 | if (hw->mac.type == e1000_ich8lan) | ||
3252 | reg |= (1 << 28) | (1 << 29); | ||
3253 | reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); | ||
3254 | ew32(TARC(0), reg); | ||
3255 | |||
3256 | /* Transmit Arbitration Control 1 */ | ||
3257 | reg = er32(TARC(1)); | ||
3258 | if (er32(TCTL) & E1000_TCTL_MULR) | ||
3259 | reg &= ~(1 << 28); | ||
3260 | else | ||
3261 | reg |= (1 << 28); | ||
3262 | reg |= (1 << 24) | (1 << 26) | (1 << 30); | ||
3263 | ew32(TARC(1), reg); | ||
3264 | |||
3265 | /* Device Status */ | ||
3266 | if (hw->mac.type == e1000_ich8lan) { | ||
3267 | reg = er32(STATUS); | ||
3268 | reg &= ~(1 << 31); | ||
3269 | ew32(STATUS, reg); | ||
3270 | } | ||
3271 | |||
3272 | /* | ||
3273 | * work-around descriptor data corruption issue during nfs v2 udp | ||
3274 | * traffic, just disable the nfs filtering capability | ||
3275 | */ | ||
3276 | reg = er32(RFCTL); | ||
3277 | reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); | ||
3278 | ew32(RFCTL, reg); | ||
3279 | } | ||
3280 | |||
3281 | /** | ||
3282 | * e1000_setup_link_ich8lan - Setup flow control and link settings | ||
3283 | * @hw: pointer to the HW structure | ||
3284 | * | ||
3285 | * Determines which flow control settings to use, then configures flow | ||
3286 | * control. Calls the appropriate media-specific link configuration | ||
3287 | * function. Assuming the adapter has a valid link partner, a valid link | ||
3288 | * should be established. Assumes the hardware has previously been reset | ||
3289 | * and the transmitter and receiver are not enabled. | ||
3290 | **/ | ||
3291 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | ||
3292 | { | ||
3293 | s32 ret_val; | ||
3294 | |||
3295 | if (e1000_check_reset_block(hw)) | ||
3296 | return 0; | ||
3297 | |||
3298 | /* | ||
3299 | * ICH parts do not have a word in the NVM to determine | ||
3300 | * the default flow control setting, so we explicitly | ||
3301 | * set it to full. | ||
3302 | */ | ||
3303 | if (hw->fc.requested_mode == e1000_fc_default) { | ||
3304 | /* Workaround h/w hang when Tx flow control enabled */ | ||
3305 | if (hw->mac.type == e1000_pchlan) | ||
3306 | hw->fc.requested_mode = e1000_fc_rx_pause; | ||
3307 | else | ||
3308 | hw->fc.requested_mode = e1000_fc_full; | ||
3309 | } | ||
3310 | |||
3311 | /* | ||
3312 | * Save off the requested flow control mode for use later. Depending | ||
3313 | * on the link partner's capabilities, we may or may not use this mode. | ||
3314 | */ | ||
3315 | hw->fc.current_mode = hw->fc.requested_mode; | ||
3316 | |||
3317 | e_dbg("After fix-ups FlowControl is now = %x\n", | ||
3318 | hw->fc.current_mode); | ||
3319 | |||
3320 | /* Continue to configure the copper link. */ | ||
3321 | ret_val = e1000_setup_copper_link_ich8lan(hw); | ||
3322 | if (ret_val) | ||
3323 | return ret_val; | ||
3324 | |||
3325 | ew32(FCTTV, hw->fc.pause_time); | ||
3326 | if ((hw->phy.type == e1000_phy_82578) || | ||
3327 | (hw->phy.type == e1000_phy_82579) || | ||
3328 | (hw->phy.type == e1000_phy_82577)) { | ||
3329 | ew32(FCRTV_PCH, hw->fc.refresh_time); | ||
3330 | |||
3331 | ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), | ||
3332 | hw->fc.pause_time); | ||
3333 | if (ret_val) | ||
3334 | return ret_val; | ||
3335 | } | ||
3336 | |||
3337 | return e1000e_set_fc_watermarks(hw); | ||
3338 | } | ||
3339 | |||
3340 | /** | ||
3341 | * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface | ||
3342 | * @hw: pointer to the HW structure | ||
3343 | * | ||
3344 | * Configures the kumeran interface to the PHY to wait the appropriate time | ||
3345 | * when polling the PHY, then call the generic setup_copper_link to finish | ||
3346 | * configuring the copper link. | ||
3347 | **/ | ||
3348 | static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | ||
3349 | { | ||
3350 | u32 ctrl; | ||
3351 | s32 ret_val; | ||
3352 | u16 reg_data; | ||
3353 | |||
3354 | ctrl = er32(CTRL); | ||
3355 | ctrl |= E1000_CTRL_SLU; | ||
3356 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | ||
3357 | ew32(CTRL, ctrl); | ||
3358 | |||
3359 | /* | ||
3360 | * Set the mac to wait the maximum time between each iteration | ||
3361 | * and increase the max iterations when polling the phy; | ||
3362 | * this fixes erroneous timeouts at 10Mbps. | ||
3363 | */ | ||
3364 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); | ||
3365 | if (ret_val) | ||
3366 | return ret_val; | ||
3367 | ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | ||
3368 | ®_data); | ||
3369 | if (ret_val) | ||
3370 | return ret_val; | ||
3371 | reg_data |= 0x3F; | ||
3372 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | ||
3373 | reg_data); | ||
3374 | if (ret_val) | ||
3375 | return ret_val; | ||
3376 | |||
3377 | switch (hw->phy.type) { | ||
3378 | case e1000_phy_igp_3: | ||
3379 | ret_val = e1000e_copper_link_setup_igp(hw); | ||
3380 | if (ret_val) | ||
3381 | return ret_val; | ||
3382 | break; | ||
3383 | case e1000_phy_bm: | ||
3384 | case e1000_phy_82578: | ||
3385 | ret_val = e1000e_copper_link_setup_m88(hw); | ||
3386 | if (ret_val) | ||
3387 | return ret_val; | ||
3388 | break; | ||
3389 | case e1000_phy_82577: | ||
3390 | case e1000_phy_82579: | ||
3391 | ret_val = e1000_copper_link_setup_82577(hw); | ||
3392 | if (ret_val) | ||
3393 | return ret_val; | ||
3394 | break; | ||
3395 | case e1000_phy_ife: | ||
3396 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); | ||
3397 | if (ret_val) | ||
3398 | return ret_val; | ||
3399 | |||
3400 | reg_data &= ~IFE_PMC_AUTO_MDIX; | ||
3401 | |||
3402 | switch (hw->phy.mdix) { | ||
3403 | case 1: | ||
3404 | reg_data &= ~IFE_PMC_FORCE_MDIX; | ||
3405 | break; | ||
3406 | case 2: | ||
3407 | reg_data |= IFE_PMC_FORCE_MDIX; | ||
3408 | break; | ||
3409 | case 0: | ||
3410 | default: | ||
3411 | reg_data |= IFE_PMC_AUTO_MDIX; | ||
3412 | break; | ||
3413 | } | ||
3414 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); | ||
3415 | if (ret_val) | ||
3416 | return ret_val; | ||
3417 | break; | ||
3418 | default: | ||
3419 | break; | ||
3420 | } | ||
3421 | return e1000e_setup_copper_link(hw); | ||
3422 | } | ||
3423 | |||
3424 | /** | ||
3425 | * e1000_get_link_up_info_ich8lan - Get current link speed and duplex | ||
3426 | * @hw: pointer to the HW structure | ||
3427 | * @speed: pointer to store current link speed | ||
3428 | * @duplex: pointer to store the current link duplex | ||
3429 | * | ||
3430 | * Calls the generic get_speed_and_duplex to retrieve the current link | ||
3431 | * information and then calls the Kumeran lock loss workaround for links at | ||
3432 | * gigabit speeds. | ||
3433 | **/ | ||
3434 | static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, | ||
3435 | u16 *duplex) | ||
3436 | { | ||
3437 | s32 ret_val; | ||
3438 | |||
3439 | ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); | ||
3440 | if (ret_val) | ||
3441 | return ret_val; | ||
3442 | |||
3443 | if ((hw->mac.type == e1000_ich8lan) && | ||
3444 | (hw->phy.type == e1000_phy_igp_3) && | ||
3445 | (*speed == SPEED_1000)) { | ||
3446 | ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); | ||
3447 | } | ||
3448 | |||
3449 | return ret_val; | ||
3450 | } | ||
3451 | |||
3452 | /** | ||
3453 | * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround | ||
3454 | * @hw: pointer to the HW structure | ||
3455 | * | ||
3456 | * Work-around for 82566 Kumeran PCS lock loss: | ||
3457 | * On link status change (i.e. PCI reset, speed change) and link is up and | ||
3458 | * speed is gigabit- | ||
3459 | * 0) if workaround is optionally disabled do nothing | ||
3460 | * 1) wait 1ms for Kumeran link to come up | ||
3461 | * 2) check Kumeran Diagnostic register PCS lock loss bit | ||
3462 | * 3) if not set the link is locked (all is good), otherwise... | ||
3463 | * 4) reset the PHY | ||
3464 | * 5) repeat up to 10 times | ||
3465 | * Note: this is only called for IGP3 copper when speed is 1gb. | ||
3466 | **/ | ||
3467 | static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | ||
3468 | { | ||
3469 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
3470 | u32 phy_ctrl; | ||
3471 | s32 ret_val; | ||
3472 | u16 i, data; | ||
3473 | bool link; | ||
3474 | |||
3475 | if (!dev_spec->kmrn_lock_loss_workaround_enabled) | ||
3476 | return 0; | ||
3477 | |||
3478 | /* | ||
3479 | * Make sure link is up before proceeding. If not just return. | ||
3480 | * Attempting this while link is negotiating fouled up link | ||
3481 | * stability | ||
3482 | */ | ||
3483 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
3484 | if (!link) | ||
3485 | return 0; | ||
3486 | |||
3487 | for (i = 0; i < 10; i++) { | ||
3488 | /* read once to clear */ | ||
3489 | ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); | ||
3490 | if (ret_val) | ||
3491 | return ret_val; | ||
3492 | /* and again to get new status */ | ||
3493 | ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); | ||
3494 | if (ret_val) | ||
3495 | return ret_val; | ||
3496 | |||
3497 | /* check for PCS lock */ | ||
3498 | if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) | ||
3499 | return 0; | ||
3500 | |||
3501 | /* Issue PHY reset */ | ||
3502 | e1000_phy_hw_reset(hw); | ||
3503 | mdelay(5); | ||
3504 | } | ||
3505 | /* Disable GigE link negotiation */ | ||
3506 | phy_ctrl = er32(PHY_CTRL); | ||
3507 | phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | | ||
3508 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3509 | ew32(PHY_CTRL, phy_ctrl); | ||
3510 | |||
3511 | /* | ||
3512 | * Call gig speed drop workaround on Gig disable before accessing | ||
3513 | * any PHY registers | ||
3514 | */ | ||
3515 | e1000e_gig_downshift_workaround_ich8lan(hw); | ||
3516 | |||
3517 | /* unable to acquire PCS lock */ | ||
3518 | return -E1000_ERR_PHY; | ||
3519 | } | ||
3520 | |||
3521 | /** | ||
3522 | * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state | ||
3523 | * @hw: pointer to the HW structure | ||
3524 | * @state: boolean value used to set the current Kumeran workaround state | ||
3525 | * | ||
3526 | * If ICH8, set the current Kumeran workaround state (enabled - true | ||
3527 | * /disabled - false). | ||
3528 | **/ | ||
3529 | void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | ||
3530 | bool state) | ||
3531 | { | ||
3532 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
3533 | |||
3534 | if (hw->mac.type != e1000_ich8lan) { | ||
3535 | e_dbg("Workaround applies to ICH8 only.\n"); | ||
3536 | return; | ||
3537 | } | ||
3538 | |||
3539 | dev_spec->kmrn_lock_loss_workaround_enabled = state; | ||
3540 | } | ||
3541 | |||
3542 | /** | ||
3543 | * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 | ||
3544 | * @hw: pointer to the HW structure | ||
3545 | * | ||
3546 | * Workaround for 82566 power-down on D3 entry: | ||
3547 | * 1) disable gigabit link | ||
3548 | * 2) write VR power-down enable | ||
3549 | * 3) read it back | ||
3550 | * Continue if successful, else issue LCD reset and repeat | ||
3551 | **/ | ||
3552 | void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) | ||
3553 | { | ||
3554 | u32 reg; | ||
3555 | u16 data; | ||
3556 | u8 retry = 0; | ||
3557 | |||
3558 | if (hw->phy.type != e1000_phy_igp_3) | ||
3559 | return; | ||
3560 | |||
3561 | /* Try the workaround twice (if needed) */ | ||
3562 | do { | ||
3563 | /* Disable link */ | ||
3564 | reg = er32(PHY_CTRL); | ||
3565 | reg |= (E1000_PHY_CTRL_GBE_DISABLE | | ||
3566 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3567 | ew32(PHY_CTRL, reg); | ||
3568 | |||
3569 | /* | ||
3570 | * Call gig speed drop workaround on Gig disable before | ||
3571 | * accessing any PHY registers | ||
3572 | */ | ||
3573 | if (hw->mac.type == e1000_ich8lan) | ||
3574 | e1000e_gig_downshift_workaround_ich8lan(hw); | ||
3575 | |||
3576 | /* Write VR power-down enable */ | ||
3577 | e1e_rphy(hw, IGP3_VR_CTRL, &data); | ||
3578 | data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; | ||
3579 | e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); | ||
3580 | |||
3581 | /* Read it back and test */ | ||
3582 | e1e_rphy(hw, IGP3_VR_CTRL, &data); | ||
3583 | data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; | ||
3584 | if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) | ||
3585 | break; | ||
3586 | |||
3587 | /* Issue PHY reset and repeat at most one more time */ | ||
3588 | reg = er32(CTRL); | ||
3589 | ew32(CTRL, reg | E1000_CTRL_PHY_RST); | ||
3590 | retry++; | ||
3591 | } while (retry); | ||
3592 | } | ||
3593 | |||
3594 | /** | ||
3595 | * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working | ||
3596 | * @hw: pointer to the HW structure | ||
3597 | * | ||
3598 | * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), | ||
3599 | * LPLU, Gig disable, MDIC PHY reset): | ||
3600 | * 1) Set Kumeran Near-end loopback | ||
3601 | * 2) Clear Kumeran Near-end loopback | ||
3602 | * Should only be called for ICH8[m] devices with IGP_3 Phy. | ||
3603 | **/ | ||
3604 | void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) | ||
3605 | { | ||
3606 | s32 ret_val; | ||
3607 | u16 reg_data; | ||
3608 | |||
3609 | if ((hw->mac.type != e1000_ich8lan) || | ||
3610 | (hw->phy.type != e1000_phy_igp_3)) | ||
3611 | return; | ||
3612 | |||
3613 | ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | ||
3614 | ®_data); | ||
3615 | if (ret_val) | ||
3616 | return; | ||
3617 | reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; | ||
3618 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | ||
3619 | reg_data); | ||
3620 | if (ret_val) | ||
3621 | return; | ||
3622 | reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; | ||
3623 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, | ||
3624 | reg_data); | ||
3625 | } | ||
3626 | |||
3627 | /** | ||
3628 | * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx | ||
3629 | * @hw: pointer to the HW structure | ||
3630 | * | ||
3631 | * During S0 to Sx transition, it is possible the link remains at gig | ||
3632 | * instead of negotiating to a lower speed. Before going to Sx, set | ||
3633 | * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation | ||
3634 | * to a lower speed. For PCH and newer parts, the OEM bits PHY register | ||
3635 | * (LED, GbE disable and LPLU configurations) also needs to be written. | ||
3636 | **/ | ||
3637 | void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) | ||
3638 | { | ||
3639 | u32 phy_ctrl; | ||
3640 | s32 ret_val; | ||
3641 | |||
3642 | phy_ctrl = er32(PHY_CTRL); | ||
3643 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; | ||
3644 | ew32(PHY_CTRL, phy_ctrl); | ||
3645 | |||
3646 | if (hw->mac.type >= e1000_pchlan) { | ||
3647 | e1000_oem_bits_config_ich8lan(hw, false); | ||
3648 | ret_val = hw->phy.ops.acquire(hw); | ||
3649 | if (ret_val) | ||
3650 | return; | ||
3651 | e1000_write_smbus_addr(hw); | ||
3652 | hw->phy.ops.release(hw); | ||
3653 | } | ||
3654 | } | ||
3655 | |||
3656 | /** | ||
3657 | * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 | ||
3658 | * @hw: pointer to the HW structure | ||
3659 | * | ||
3660 | * During Sx to S0 transitions on non-managed devices or managed devices | ||
3661 | * on which PHY resets are not blocked, if the PHY registers cannot be | ||
3662 | * accessed properly by the s/w toggle the LANPHYPC value to power cycle | ||
3663 | * the PHY. | ||
3664 | **/ | ||
3665 | void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) | ||
3666 | { | ||
3667 | u32 fwsm; | ||
3668 | |||
3669 | if (hw->mac.type != e1000_pch2lan) | ||
3670 | return; | ||
3671 | |||
3672 | fwsm = er32(FWSM); | ||
3673 | if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { | ||
3674 | u16 phy_id1, phy_id2; | ||
3675 | s32 ret_val; | ||
3676 | |||
3677 | ret_val = hw->phy.ops.acquire(hw); | ||
3678 | if (ret_val) { | ||
3679 | e_dbg("Failed to acquire PHY semaphore in resume\n"); | ||
3680 | return; | ||
3681 | } | ||
3682 | |||
3683 | /* Test access to the PHY registers by reading the ID regs */ | ||
3684 | ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); | ||
3685 | if (ret_val) | ||
3686 | goto release; | ||
3687 | ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); | ||
3688 | if (ret_val) | ||
3689 | goto release; | ||
3690 | |||
3691 | if (hw->phy.id == ((u32)(phy_id1 << 16) | | ||
3692 | (u32)(phy_id2 & PHY_REVISION_MASK))) | ||
3693 | goto release; | ||
3694 | |||
3695 | e1000_toggle_lanphypc_value_ich8lan(hw); | ||
3696 | |||
3697 | hw->phy.ops.release(hw); | ||
3698 | msleep(50); | ||
3699 | e1000_phy_hw_reset(hw); | ||
3700 | msleep(50); | ||
3701 | return; | ||
3702 | } | ||
3703 | |||
3704 | release: | ||
3705 | hw->phy.ops.release(hw); | ||
3706 | |||
3707 | return; | ||
3708 | } | ||
3709 | |||
3710 | /** | ||
3711 | * e1000_cleanup_led_ich8lan - Restore the default LED operation | ||
3712 | * @hw: pointer to the HW structure | ||
3713 | * | ||
3714 | * Return the LED back to the default configuration. | ||
3715 | **/ | ||
3716 | static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) | ||
3717 | { | ||
3718 | if (hw->phy.type == e1000_phy_ife) | ||
3719 | return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
3720 | |||
3721 | ew32(LEDCTL, hw->mac.ledctl_default); | ||
3722 | return 0; | ||
3723 | } | ||
3724 | |||
3725 | /** | ||
3726 | * e1000_led_on_ich8lan - Turn LEDs on | ||
3727 | * @hw: pointer to the HW structure | ||
3728 | * | ||
3729 | * Turn on the LEDs. | ||
3730 | **/ | ||
3731 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) | ||
3732 | { | ||
3733 | if (hw->phy.type == e1000_phy_ife) | ||
3734 | return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
3735 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); | ||
3736 | |||
3737 | ew32(LEDCTL, hw->mac.ledctl_mode2); | ||
3738 | return 0; | ||
3739 | } | ||
3740 | |||
3741 | /** | ||
3742 | * e1000_led_off_ich8lan - Turn LEDs off | ||
3743 | * @hw: pointer to the HW structure | ||
3744 | * | ||
3745 | * Turn off the LEDs. | ||
3746 | **/ | ||
3747 | static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | ||
3748 | { | ||
3749 | if (hw->phy.type == e1000_phy_ife) | ||
3750 | return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
3751 | (IFE_PSCL_PROBE_MODE | | ||
3752 | IFE_PSCL_PROBE_LEDS_OFF)); | ||
3753 | |||
3754 | ew32(LEDCTL, hw->mac.ledctl_mode1); | ||
3755 | return 0; | ||
3756 | } | ||
3757 | |||
3758 | /** | ||
3759 | * e1000_setup_led_pchlan - Configures SW controllable LED | ||
3760 | * @hw: pointer to the HW structure | ||
3761 | * | ||
3762 | * This prepares the SW controllable LED for use. | ||
3763 | **/ | ||
3764 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | ||
3765 | { | ||
3766 | return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); | ||
3767 | } | ||
3768 | |||
3769 | /** | ||
3770 | * e1000_cleanup_led_pchlan - Restore the default LED operation | ||
3771 | * @hw: pointer to the HW structure | ||
3772 | * | ||
3773 | * Return the LED back to the default configuration. | ||
3774 | **/ | ||
3775 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) | ||
3776 | { | ||
3777 | return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); | ||
3778 | } | ||
3779 | |||
3780 | /** | ||
3781 | * e1000_led_on_pchlan - Turn LEDs on | ||
3782 | * @hw: pointer to the HW structure | ||
3783 | * | ||
3784 | * Turn on the LEDs. | ||
3785 | **/ | ||
3786 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw) | ||
3787 | { | ||
3788 | u16 data = (u16)hw->mac.ledctl_mode2; | ||
3789 | u32 i, led; | ||
3790 | |||
3791 | /* | ||
3792 | * If no link, then turn LED on by setting the invert bit | ||
3793 | * for each LED that's mode is "link_up" in ledctl_mode2. | ||
3794 | */ | ||
3795 | if (!(er32(STATUS) & E1000_STATUS_LU)) { | ||
3796 | for (i = 0; i < 3; i++) { | ||
3797 | led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; | ||
3798 | if ((led & E1000_PHY_LED0_MODE_MASK) != | ||
3799 | E1000_LEDCTL_MODE_LINK_UP) | ||
3800 | continue; | ||
3801 | if (led & E1000_PHY_LED0_IVRT) | ||
3802 | data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); | ||
3803 | else | ||
3804 | data |= (E1000_PHY_LED0_IVRT << (i * 5)); | ||
3805 | } | ||
3806 | } | ||
3807 | |||
3808 | return e1e_wphy(hw, HV_LED_CONFIG, data); | ||
3809 | } | ||
3810 | |||
3811 | /** | ||
3812 | * e1000_led_off_pchlan - Turn LEDs off | ||
3813 | * @hw: pointer to the HW structure | ||
3814 | * | ||
3815 | * Turn off the LEDs. | ||
3816 | **/ | ||
3817 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw) | ||
3818 | { | ||
3819 | u16 data = (u16)hw->mac.ledctl_mode1; | ||
3820 | u32 i, led; | ||
3821 | |||
3822 | /* | ||
3823 | * If no link, then turn LED off by clearing the invert bit | ||
3824 | * for each LED that's mode is "link_up" in ledctl_mode1. | ||
3825 | */ | ||
3826 | if (!(er32(STATUS) & E1000_STATUS_LU)) { | ||
3827 | for (i = 0; i < 3; i++) { | ||
3828 | led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; | ||
3829 | if ((led & E1000_PHY_LED0_MODE_MASK) != | ||
3830 | E1000_LEDCTL_MODE_LINK_UP) | ||
3831 | continue; | ||
3832 | if (led & E1000_PHY_LED0_IVRT) | ||
3833 | data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); | ||
3834 | else | ||
3835 | data |= (E1000_PHY_LED0_IVRT << (i * 5)); | ||
3836 | } | ||
3837 | } | ||
3838 | |||
3839 | return e1e_wphy(hw, HV_LED_CONFIG, data); | ||
3840 | } | ||
3841 | |||
3842 | /** | ||
3843 | * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset | ||
3844 | * @hw: pointer to the HW structure | ||
3845 | * | ||
3846 | * Read appropriate register for the config done bit for completion status | ||
3847 | * and configure the PHY through s/w for EEPROM-less parts. | ||
3848 | * | ||
3849 | * NOTE: some silicon which is EEPROM-less will fail trying to read the | ||
3850 | * config done bit, so only an error is logged and continues. If we were | ||
3851 | * to return with error, EEPROM-less silicon would not be able to be reset | ||
3852 | * or change link. | ||
3853 | **/ | ||
3854 | static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) | ||
3855 | { | ||
3856 | s32 ret_val = 0; | ||
3857 | u32 bank = 0; | ||
3858 | u32 status; | ||
3859 | |||
3860 | e1000e_get_cfg_done(hw); | ||
3861 | |||
3862 | /* Wait for indication from h/w that it has completed basic config */ | ||
3863 | if (hw->mac.type >= e1000_ich10lan) { | ||
3864 | e1000_lan_init_done_ich8lan(hw); | ||
3865 | } else { | ||
3866 | ret_val = e1000e_get_auto_rd_done(hw); | ||
3867 | if (ret_val) { | ||
3868 | /* | ||
3869 | * When auto config read does not complete, do not | ||
3870 | * return with an error. This can happen in situations | ||
3871 | * where there is no eeprom and prevents getting link. | ||
3872 | */ | ||
3873 | e_dbg("Auto Read Done did not complete\n"); | ||
3874 | ret_val = 0; | ||
3875 | } | ||
3876 | } | ||
3877 | |||
3878 | /* Clear PHY Reset Asserted bit */ | ||
3879 | status = er32(STATUS); | ||
3880 | if (status & E1000_STATUS_PHYRA) | ||
3881 | ew32(STATUS, status & ~E1000_STATUS_PHYRA); | ||
3882 | else | ||
3883 | e_dbg("PHY Reset Asserted not set - needs delay\n"); | ||
3884 | |||
3885 | /* If EEPROM is not marked present, init the IGP 3 PHY manually */ | ||
3886 | if (hw->mac.type <= e1000_ich9lan) { | ||
3887 | if (((er32(EECD) & E1000_EECD_PRES) == 0) && | ||
3888 | (hw->phy.type == e1000_phy_igp_3)) { | ||
3889 | e1000e_phy_init_script_igp3(hw); | ||
3890 | } | ||
3891 | } else { | ||
3892 | if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { | ||
3893 | /* Maybe we should do a basic PHY config */ | ||
3894 | e_dbg("EEPROM not present\n"); | ||
3895 | ret_val = -E1000_ERR_CONFIG; | ||
3896 | } | ||
3897 | } | ||
3898 | |||
3899 | return ret_val; | ||
3900 | } | ||
3901 | |||
3902 | /** | ||
3903 | * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down | ||
3904 | * @hw: pointer to the HW structure | ||
3905 | * | ||
3906 | * In the case of a PHY power down to save power, or to turn off link during a | ||
3907 | * driver unload, or wake on lan is not enabled, remove the link. | ||
3908 | **/ | ||
3909 | static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) | ||
3910 | { | ||
3911 | /* If the management interface is not enabled, then power down */ | ||
3912 | if (!(hw->mac.ops.check_mng_mode(hw) || | ||
3913 | hw->phy.ops.check_reset_block(hw))) | ||
3914 | e1000_power_down_phy_copper(hw); | ||
3915 | } | ||
3916 | |||
3917 | /** | ||
3918 | * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters | ||
3919 | * @hw: pointer to the HW structure | ||
3920 | * | ||
3921 | * Clears hardware counters specific to the silicon family and calls | ||
3922 | * clear_hw_cntrs_generic to clear all general purpose counters. | ||
3923 | **/ | ||
3924 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) | ||
3925 | { | ||
3926 | u16 phy_data; | ||
3927 | s32 ret_val; | ||
3928 | |||
3929 | e1000e_clear_hw_cntrs_base(hw); | ||
3930 | |||
3931 | er32(ALGNERRC); | ||
3932 | er32(RXERRC); | ||
3933 | er32(TNCRS); | ||
3934 | er32(CEXTERR); | ||
3935 | er32(TSCTC); | ||
3936 | er32(TSCTFC); | ||
3937 | |||
3938 | er32(MGTPRC); | ||
3939 | er32(MGTPDC); | ||
3940 | er32(MGTPTC); | ||
3941 | |||
3942 | er32(IAC); | ||
3943 | er32(ICRXOC); | ||
3944 | |||
3945 | /* Clear PHY statistics registers */ | ||
3946 | if ((hw->phy.type == e1000_phy_82578) || | ||
3947 | (hw->phy.type == e1000_phy_82579) || | ||
3948 | (hw->phy.type == e1000_phy_82577)) { | ||
3949 | ret_val = hw->phy.ops.acquire(hw); | ||
3950 | if (ret_val) | ||
3951 | return; | ||
3952 | ret_val = hw->phy.ops.set_page(hw, | ||
3953 | HV_STATS_PAGE << IGP_PAGE_SHIFT); | ||
3954 | if (ret_val) | ||
3955 | goto release; | ||
3956 | hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); | ||
3957 | hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); | ||
3958 | hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); | ||
3959 | hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); | ||
3960 | hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); | ||
3961 | hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); | ||
3962 | hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); | ||
3963 | hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); | ||
3964 | hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); | ||
3965 | hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); | ||
3966 | hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); | ||
3967 | hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); | ||
3968 | hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); | ||
3969 | hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); | ||
3970 | release: | ||
3971 | hw->phy.ops.release(hw); | ||
3972 | } | ||
3973 | } | ||
3974 | |||
3975 | static struct e1000_mac_operations ich8_mac_ops = { | ||
3976 | .id_led_init = e1000e_id_led_init, | ||
3977 | /* check_mng_mode dependent on mac type */ | ||
3978 | .check_for_link = e1000_check_for_copper_link_ich8lan, | ||
3979 | /* cleanup_led dependent on mac type */ | ||
3980 | .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, | ||
3981 | .get_bus_info = e1000_get_bus_info_ich8lan, | ||
3982 | .set_lan_id = e1000_set_lan_id_single_port, | ||
3983 | .get_link_up_info = e1000_get_link_up_info_ich8lan, | ||
3984 | /* led_on dependent on mac type */ | ||
3985 | /* led_off dependent on mac type */ | ||
3986 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, | ||
3987 | .reset_hw = e1000_reset_hw_ich8lan, | ||
3988 | .init_hw = e1000_init_hw_ich8lan, | ||
3989 | .setup_link = e1000_setup_link_ich8lan, | ||
3990 | .setup_physical_interface= e1000_setup_copper_link_ich8lan, | ||
3991 | /* id_led_init dependent on mac type */ | ||
3992 | }; | ||
3993 | |||
3994 | static struct e1000_phy_operations ich8_phy_ops = { | ||
3995 | .acquire = e1000_acquire_swflag_ich8lan, | ||
3996 | .check_reset_block = e1000_check_reset_block_ich8lan, | ||
3997 | .commit = NULL, | ||
3998 | .get_cfg_done = e1000_get_cfg_done_ich8lan, | ||
3999 | .get_cable_length = e1000e_get_cable_length_igp_2, | ||
4000 | .read_reg = e1000e_read_phy_reg_igp, | ||
4001 | .release = e1000_release_swflag_ich8lan, | ||
4002 | .reset = e1000_phy_hw_reset_ich8lan, | ||
4003 | .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, | ||
4004 | .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, | ||
4005 | .write_reg = e1000e_write_phy_reg_igp, | ||
4006 | }; | ||
4007 | |||
4008 | static struct e1000_nvm_operations ich8_nvm_ops = { | ||
4009 | .acquire = e1000_acquire_nvm_ich8lan, | ||
4010 | .read = e1000_read_nvm_ich8lan, | ||
4011 | .release = e1000_release_nvm_ich8lan, | ||
4012 | .update = e1000_update_nvm_checksum_ich8lan, | ||
4013 | .valid_led_default = e1000_valid_led_default_ich8lan, | ||
4014 | .validate = e1000_validate_nvm_checksum_ich8lan, | ||
4015 | .write = e1000_write_nvm_ich8lan, | ||
4016 | }; | ||
4017 | |||
4018 | struct e1000_info e1000_ich8_info = { | ||
4019 | .mac = e1000_ich8lan, | ||
4020 | .flags = FLAG_HAS_WOL | ||
4021 | | FLAG_IS_ICH | ||
4022 | | FLAG_RX_CSUM_ENABLED | ||
4023 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
4024 | | FLAG_HAS_AMT | ||
4025 | | FLAG_HAS_FLASH | ||
4026 | | FLAG_APME_IN_WUC, | ||
4027 | .pba = 8, | ||
4028 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | ||
4029 | .get_variants = e1000_get_variants_ich8lan, | ||
4030 | .mac_ops = &ich8_mac_ops, | ||
4031 | .phy_ops = &ich8_phy_ops, | ||
4032 | .nvm_ops = &ich8_nvm_ops, | ||
4033 | }; | ||
4034 | |||
4035 | struct e1000_info e1000_ich9_info = { | ||
4036 | .mac = e1000_ich9lan, | ||
4037 | .flags = FLAG_HAS_JUMBO_FRAMES | ||
4038 | | FLAG_IS_ICH | ||
4039 | | FLAG_HAS_WOL | ||
4040 | | FLAG_RX_CSUM_ENABLED | ||
4041 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
4042 | | FLAG_HAS_AMT | ||
4043 | | FLAG_HAS_ERT | ||
4044 | | FLAG_HAS_FLASH | ||
4045 | | FLAG_APME_IN_WUC, | ||
4046 | .pba = 10, | ||
4047 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
4048 | .get_variants = e1000_get_variants_ich8lan, | ||
4049 | .mac_ops = &ich8_mac_ops, | ||
4050 | .phy_ops = &ich8_phy_ops, | ||
4051 | .nvm_ops = &ich8_nvm_ops, | ||
4052 | }; | ||
4053 | |||
4054 | struct e1000_info e1000_ich10_info = { | ||
4055 | .mac = e1000_ich10lan, | ||
4056 | .flags = FLAG_HAS_JUMBO_FRAMES | ||
4057 | | FLAG_IS_ICH | ||
4058 | | FLAG_HAS_WOL | ||
4059 | | FLAG_RX_CSUM_ENABLED | ||
4060 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
4061 | | FLAG_HAS_AMT | ||
4062 | | FLAG_HAS_ERT | ||
4063 | | FLAG_HAS_FLASH | ||
4064 | | FLAG_APME_IN_WUC, | ||
4065 | .pba = 10, | ||
4066 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
4067 | .get_variants = e1000_get_variants_ich8lan, | ||
4068 | .mac_ops = &ich8_mac_ops, | ||
4069 | .phy_ops = &ich8_phy_ops, | ||
4070 | .nvm_ops = &ich8_nvm_ops, | ||
4071 | }; | ||
4072 | |||
4073 | struct e1000_info e1000_pch_info = { | ||
4074 | .mac = e1000_pchlan, | ||
4075 | .flags = FLAG_IS_ICH | ||
4076 | | FLAG_HAS_WOL | ||
4077 | | FLAG_RX_CSUM_ENABLED | ||
4078 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
4079 | | FLAG_HAS_AMT | ||
4080 | | FLAG_HAS_FLASH | ||
4081 | | FLAG_HAS_JUMBO_FRAMES | ||
4082 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | ||
4083 | | FLAG_APME_IN_WUC, | ||
4084 | .flags2 = FLAG2_HAS_PHY_STATS, | ||
4085 | .pba = 26, | ||
4086 | .max_hw_frame_size = 4096, | ||
4087 | .get_variants = e1000_get_variants_ich8lan, | ||
4088 | .mac_ops = &ich8_mac_ops, | ||
4089 | .phy_ops = &ich8_phy_ops, | ||
4090 | .nvm_ops = &ich8_nvm_ops, | ||
4091 | }; | ||
4092 | |||
4093 | struct e1000_info e1000_pch2_info = { | ||
4094 | .mac = e1000_pch2lan, | ||
4095 | .flags = FLAG_IS_ICH | ||
4096 | | FLAG_HAS_WOL | ||
4097 | | FLAG_RX_CSUM_ENABLED | ||
4098 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
4099 | | FLAG_HAS_AMT | ||
4100 | | FLAG_HAS_FLASH | ||
4101 | | FLAG_HAS_JUMBO_FRAMES | ||
4102 | | FLAG_APME_IN_WUC, | ||
4103 | .flags2 = FLAG2_HAS_PHY_STATS | ||
4104 | | FLAG2_HAS_EEE, | ||
4105 | .pba = 26, | ||
4106 | .max_hw_frame_size = DEFAULT_JUMBO, | ||
4107 | .get_variants = e1000_get_variants_ich8lan, | ||
4108 | .mac_ops = &ich8_mac_ops, | ||
4109 | .phy_ops = &ich8_phy_ops, | ||
4110 | .nvm_ops = &ich8_nvm_ops, | ||
4111 | }; | ||
diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/lib.c new file mode 100644 index 000000000000..7898a67d6505 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/lib.c | |||
@@ -0,0 +1,2692 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include "e1000.h" | ||
30 | |||
31 | enum e1000_mng_mode { | ||
32 | e1000_mng_mode_none = 0, | ||
33 | e1000_mng_mode_asf, | ||
34 | e1000_mng_mode_pt, | ||
35 | e1000_mng_mode_ipmi, | ||
36 | e1000_mng_mode_host_if_only | ||
37 | }; | ||
38 | |||
39 | #define E1000_FACTPS_MNGCG 0x20000000 | ||
40 | |||
41 | /* Intel(R) Active Management Technology signature */ | ||
42 | #define E1000_IAMT_SIGNATURE 0x544D4149 | ||
43 | |||
44 | /** | ||
45 | * e1000e_get_bus_info_pcie - Get PCIe bus information | ||
46 | * @hw: pointer to the HW structure | ||
47 | * | ||
48 | * Determines and stores the system bus information for a particular | ||
49 | * network interface. The following bus information is determined and stored: | ||
50 | * bus speed, bus width, type (PCIe), and PCIe function. | ||
51 | **/ | ||
52 | s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) | ||
53 | { | ||
54 | struct e1000_mac_info *mac = &hw->mac; | ||
55 | struct e1000_bus_info *bus = &hw->bus; | ||
56 | struct e1000_adapter *adapter = hw->adapter; | ||
57 | u16 pcie_link_status, cap_offset; | ||
58 | |||
59 | cap_offset = adapter->pdev->pcie_cap; | ||
60 | if (!cap_offset) { | ||
61 | bus->width = e1000_bus_width_unknown; | ||
62 | } else { | ||
63 | pci_read_config_word(adapter->pdev, | ||
64 | cap_offset + PCIE_LINK_STATUS, | ||
65 | &pcie_link_status); | ||
66 | bus->width = (enum e1000_bus_width)((pcie_link_status & | ||
67 | PCIE_LINK_WIDTH_MASK) >> | ||
68 | PCIE_LINK_WIDTH_SHIFT); | ||
69 | } | ||
70 | |||
71 | mac->ops.set_lan_id(hw); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices | ||
78 | * | ||
79 | * @hw: pointer to the HW structure | ||
80 | * | ||
81 | * Determines the LAN function id by reading memory-mapped registers | ||
82 | * and swaps the port value if requested. | ||
83 | **/ | ||
84 | void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) | ||
85 | { | ||
86 | struct e1000_bus_info *bus = &hw->bus; | ||
87 | u32 reg; | ||
88 | |||
89 | /* | ||
90 | * The status register reports the correct function number | ||
91 | * for the device regardless of function swap state. | ||
92 | */ | ||
93 | reg = er32(STATUS); | ||
94 | bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * e1000_set_lan_id_single_port - Set LAN id for a single port device | ||
99 | * @hw: pointer to the HW structure | ||
100 | * | ||
101 | * Sets the LAN function id to zero for a single port device. | ||
102 | **/ | ||
103 | void e1000_set_lan_id_single_port(struct e1000_hw *hw) | ||
104 | { | ||
105 | struct e1000_bus_info *bus = &hw->bus; | ||
106 | |||
107 | bus->func = 0; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * e1000_clear_vfta_generic - Clear VLAN filter table | ||
112 | * @hw: pointer to the HW structure | ||
113 | * | ||
114 | * Clears the register array which contains the VLAN filter table by | ||
115 | * setting all the values to 0. | ||
116 | **/ | ||
117 | void e1000_clear_vfta_generic(struct e1000_hw *hw) | ||
118 | { | ||
119 | u32 offset; | ||
120 | |||
121 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | ||
122 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); | ||
123 | e1e_flush(); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * e1000_write_vfta_generic - Write value to VLAN filter table | ||
129 | * @hw: pointer to the HW structure | ||
130 | * @offset: register offset in VLAN filter table | ||
131 | * @value: register value written to VLAN filter table | ||
132 | * | ||
133 | * Writes value at the given offset in the register array which stores | ||
134 | * the VLAN filter table. | ||
135 | **/ | ||
136 | void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) | ||
137 | { | ||
138 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); | ||
139 | e1e_flush(); | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * e1000e_init_rx_addrs - Initialize receive address's | ||
144 | * @hw: pointer to the HW structure | ||
145 | * @rar_count: receive address registers | ||
146 | * | ||
147 | * Setup the receive address registers by setting the base receive address | ||
148 | * register to the devices MAC address and clearing all the other receive | ||
149 | * address registers to 0. | ||
150 | **/ | ||
151 | void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | ||
152 | { | ||
153 | u32 i; | ||
154 | u8 mac_addr[ETH_ALEN] = {0}; | ||
155 | |||
156 | /* Setup the receive address */ | ||
157 | e_dbg("Programming MAC Address into RAR[0]\n"); | ||
158 | |||
159 | e1000e_rar_set(hw, hw->mac.addr, 0); | ||
160 | |||
161 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | ||
162 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); | ||
163 | for (i = 1; i < rar_count; i++) | ||
164 | e1000e_rar_set(hw, mac_addr, i); | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr | ||
169 | * @hw: pointer to the HW structure | ||
170 | * | ||
171 | * Checks the nvm for an alternate MAC address. An alternate MAC address | ||
172 | * can be setup by pre-boot software and must be treated like a permanent | ||
173 | * address and must override the actual permanent MAC address. If an | ||
174 | * alternate MAC address is found it is programmed into RAR0, replacing | ||
175 | * the permanent address that was installed into RAR0 by the Si on reset. | ||
176 | * This function will return SUCCESS unless it encounters an error while | ||
177 | * reading the EEPROM. | ||
178 | **/ | ||
179 | s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) | ||
180 | { | ||
181 | u32 i; | ||
182 | s32 ret_val = 0; | ||
183 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; | ||
184 | u8 alt_mac_addr[ETH_ALEN]; | ||
185 | |||
186 | ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); | ||
187 | if (ret_val) | ||
188 | goto out; | ||
189 | |||
190 | /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ | ||
191 | if (!((nvm_data & NVM_COMPAT_LOM) || | ||
192 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || | ||
193 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) | ||
194 | goto out; | ||
195 | |||
196 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | ||
197 | &nvm_alt_mac_addr_offset); | ||
198 | if (ret_val) { | ||
199 | e_dbg("NVM Read Error\n"); | ||
200 | goto out; | ||
201 | } | ||
202 | |||
203 | if (nvm_alt_mac_addr_offset == 0xFFFF) { | ||
204 | /* There is no Alternate MAC Address */ | ||
205 | goto out; | ||
206 | } | ||
207 | |||
208 | if (hw->bus.func == E1000_FUNC_1) | ||
209 | nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; | ||
210 | for (i = 0; i < ETH_ALEN; i += 2) { | ||
211 | offset = nvm_alt_mac_addr_offset + (i >> 1); | ||
212 | ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); | ||
213 | if (ret_val) { | ||
214 | e_dbg("NVM Read Error\n"); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | alt_mac_addr[i] = (u8)(nvm_data & 0xFF); | ||
219 | alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); | ||
220 | } | ||
221 | |||
222 | /* if multicast bit is set, the alternate address will not be used */ | ||
223 | if (is_multicast_ether_addr(alt_mac_addr)) { | ||
224 | e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); | ||
225 | goto out; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * We have a valid alternate MAC address, and we want to treat it the | ||
230 | * same as the normal permanent MAC address stored by the HW into the | ||
231 | * RAR. Do this by mapping this address into RAR0. | ||
232 | */ | ||
233 | e1000e_rar_set(hw, alt_mac_addr, 0); | ||
234 | |||
235 | out: | ||
236 | return ret_val; | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * e1000e_rar_set - Set receive address register | ||
241 | * @hw: pointer to the HW structure | ||
242 | * @addr: pointer to the receive address | ||
243 | * @index: receive address array register | ||
244 | * | ||
245 | * Sets the receive address array register at index to the address passed | ||
246 | * in by addr. | ||
247 | **/ | ||
248 | void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | ||
249 | { | ||
250 | u32 rar_low, rar_high; | ||
251 | |||
252 | /* | ||
253 | * HW expects these in little endian so we reverse the byte order | ||
254 | * from network order (big endian) to little endian | ||
255 | */ | ||
256 | rar_low = ((u32) addr[0] | | ||
257 | ((u32) addr[1] << 8) | | ||
258 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); | ||
259 | |||
260 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | ||
261 | |||
262 | /* If MAC address zero, no need to set the AV bit */ | ||
263 | if (rar_low || rar_high) | ||
264 | rar_high |= E1000_RAH_AV; | ||
265 | |||
266 | /* | ||
267 | * Some bridges will combine consecutive 32-bit writes into | ||
268 | * a single burst write, which will malfunction on some parts. | ||
269 | * The flushes avoid this. | ||
270 | */ | ||
271 | ew32(RAL(index), rar_low); | ||
272 | e1e_flush(); | ||
273 | ew32(RAH(index), rar_high); | ||
274 | e1e_flush(); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * e1000_hash_mc_addr - Generate a multicast hash value | ||
279 | * @hw: pointer to the HW structure | ||
280 | * @mc_addr: pointer to a multicast address | ||
281 | * | ||
282 | * Generates a multicast address hash value which is used to determine | ||
283 | * the multicast filter table array address and new table value. See | ||
284 | * e1000_mta_set_generic() | ||
285 | **/ | ||
286 | static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | ||
287 | { | ||
288 | u32 hash_value, hash_mask; | ||
289 | u8 bit_shift = 0; | ||
290 | |||
291 | /* Register count multiplied by bits per register */ | ||
292 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | ||
293 | |||
294 | /* | ||
295 | * For a mc_filter_type of 0, bit_shift is the number of left-shifts | ||
296 | * where 0xFF would still fall within the hash mask. | ||
297 | */ | ||
298 | while (hash_mask >> bit_shift != 0xFF) | ||
299 | bit_shift++; | ||
300 | |||
301 | /* | ||
302 | * The portion of the address that is used for the hash table | ||
303 | * is determined by the mc_filter_type setting. | ||
304 | * The algorithm is such that there is a total of 8 bits of shifting. | ||
305 | * The bit_shift for a mc_filter_type of 0 represents the number of | ||
306 | * left-shifts where the MSB of mc_addr[5] would still fall within | ||
307 | * the hash_mask. Case 0 does this exactly. Since there are a total | ||
308 | * of 8 bits of shifting, then mc_addr[4] will shift right the | ||
309 | * remaining number of bits. Thus 8 - bit_shift. The rest of the | ||
310 | * cases are a variation of this algorithm...essentially raising the | ||
311 | * number of bits to shift mc_addr[5] left, while still keeping the | ||
312 | * 8-bit shifting total. | ||
313 | * | ||
314 | * For example, given the following Destination MAC Address and an | ||
315 | * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), | ||
316 | * we can see that the bit_shift for case 0 is 4. These are the hash | ||
317 | * values resulting from each mc_filter_type... | ||
318 | * [0] [1] [2] [3] [4] [5] | ||
319 | * 01 AA 00 12 34 56 | ||
320 | * LSB MSB | ||
321 | * | ||
322 | * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 | ||
323 | * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 | ||
324 | * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 | ||
325 | * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 | ||
326 | */ | ||
327 | switch (hw->mac.mc_filter_type) { | ||
328 | default: | ||
329 | case 0: | ||
330 | break; | ||
331 | case 1: | ||
332 | bit_shift += 1; | ||
333 | break; | ||
334 | case 2: | ||
335 | bit_shift += 2; | ||
336 | break; | ||
337 | case 3: | ||
338 | bit_shift += 4; | ||
339 | break; | ||
340 | } | ||
341 | |||
342 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | | ||
343 | (((u16) mc_addr[5]) << bit_shift))); | ||
344 | |||
345 | return hash_value; | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * e1000e_update_mc_addr_list_generic - Update Multicast addresses | ||
350 | * @hw: pointer to the HW structure | ||
351 | * @mc_addr_list: array of multicast addresses to program | ||
352 | * @mc_addr_count: number of multicast addresses to program | ||
353 | * | ||
354 | * Updates entire Multicast Table Array. | ||
355 | * The caller must have a packed mc_addr_list of multicast addresses. | ||
356 | **/ | ||
357 | void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, | ||
358 | u8 *mc_addr_list, u32 mc_addr_count) | ||
359 | { | ||
360 | u32 hash_value, hash_bit, hash_reg; | ||
361 | int i; | ||
362 | |||
363 | /* clear mta_shadow */ | ||
364 | memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); | ||
365 | |||
366 | /* update mta_shadow from mc_addr_list */ | ||
367 | for (i = 0; (u32) i < mc_addr_count; i++) { | ||
368 | hash_value = e1000_hash_mc_addr(hw, mc_addr_list); | ||
369 | |||
370 | hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); | ||
371 | hash_bit = hash_value & 0x1F; | ||
372 | |||
373 | hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); | ||
374 | mc_addr_list += (ETH_ALEN); | ||
375 | } | ||
376 | |||
377 | /* replace the entire MTA table */ | ||
378 | for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) | ||
379 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); | ||
380 | e1e_flush(); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * e1000e_clear_hw_cntrs_base - Clear base hardware counters | ||
385 | * @hw: pointer to the HW structure | ||
386 | * | ||
387 | * Clears the base hardware counters by reading the counter registers. | ||
388 | **/ | ||
389 | void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) | ||
390 | { | ||
391 | er32(CRCERRS); | ||
392 | er32(SYMERRS); | ||
393 | er32(MPC); | ||
394 | er32(SCC); | ||
395 | er32(ECOL); | ||
396 | er32(MCC); | ||
397 | er32(LATECOL); | ||
398 | er32(COLC); | ||
399 | er32(DC); | ||
400 | er32(SEC); | ||
401 | er32(RLEC); | ||
402 | er32(XONRXC); | ||
403 | er32(XONTXC); | ||
404 | er32(XOFFRXC); | ||
405 | er32(XOFFTXC); | ||
406 | er32(FCRUC); | ||
407 | er32(GPRC); | ||
408 | er32(BPRC); | ||
409 | er32(MPRC); | ||
410 | er32(GPTC); | ||
411 | er32(GORCL); | ||
412 | er32(GORCH); | ||
413 | er32(GOTCL); | ||
414 | er32(GOTCH); | ||
415 | er32(RNBC); | ||
416 | er32(RUC); | ||
417 | er32(RFC); | ||
418 | er32(ROC); | ||
419 | er32(RJC); | ||
420 | er32(TORL); | ||
421 | er32(TORH); | ||
422 | er32(TOTL); | ||
423 | er32(TOTH); | ||
424 | er32(TPR); | ||
425 | er32(TPT); | ||
426 | er32(MPTC); | ||
427 | er32(BPTC); | ||
428 | } | ||
429 | |||
430 | /** | ||
431 | * e1000e_check_for_copper_link - Check for link (Copper) | ||
432 | * @hw: pointer to the HW structure | ||
433 | * | ||
434 | * Checks to see of the link status of the hardware has changed. If a | ||
435 | * change in link status has been detected, then we read the PHY registers | ||
436 | * to get the current speed/duplex if link exists. | ||
437 | **/ | ||
438 | s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | ||
439 | { | ||
440 | struct e1000_mac_info *mac = &hw->mac; | ||
441 | s32 ret_val; | ||
442 | bool link; | ||
443 | |||
444 | /* | ||
445 | * We only want to go out to the PHY registers to see if Auto-Neg | ||
446 | * has completed and/or if our link status has changed. The | ||
447 | * get_link_status flag is set upon receiving a Link Status | ||
448 | * Change or Rx Sequence Error interrupt. | ||
449 | */ | ||
450 | if (!mac->get_link_status) | ||
451 | return 0; | ||
452 | |||
453 | /* | ||
454 | * First we want to see if the MII Status Register reports | ||
455 | * link. If so, then we want to get the current speed/duplex | ||
456 | * of the PHY. | ||
457 | */ | ||
458 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
459 | if (ret_val) | ||
460 | return ret_val; | ||
461 | |||
462 | if (!link) | ||
463 | return ret_val; /* No link detected */ | ||
464 | |||
465 | mac->get_link_status = false; | ||
466 | |||
467 | /* | ||
468 | * Check if there was DownShift, must be checked | ||
469 | * immediately after link-up | ||
470 | */ | ||
471 | e1000e_check_downshift(hw); | ||
472 | |||
473 | /* | ||
474 | * If we are forcing speed/duplex, then we simply return since | ||
475 | * we have already determined whether we have link or not. | ||
476 | */ | ||
477 | if (!mac->autoneg) { | ||
478 | ret_val = -E1000_ERR_CONFIG; | ||
479 | return ret_val; | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * Auto-Neg is enabled. Auto Speed Detection takes care | ||
484 | * of MAC speed/duplex configuration. So we only need to | ||
485 | * configure Collision Distance in the MAC. | ||
486 | */ | ||
487 | e1000e_config_collision_dist(hw); | ||
488 | |||
489 | /* | ||
490 | * Configure Flow Control now that Auto-Neg has completed. | ||
491 | * First, we need to restore the desired flow control | ||
492 | * settings because we may have had to re-autoneg with a | ||
493 | * different link partner. | ||
494 | */ | ||
495 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
496 | if (ret_val) | ||
497 | e_dbg("Error configuring flow control\n"); | ||
498 | |||
499 | return ret_val; | ||
500 | } | ||
501 | |||
502 | /** | ||
503 | * e1000e_check_for_fiber_link - Check for link (Fiber) | ||
504 | * @hw: pointer to the HW structure | ||
505 | * | ||
506 | * Checks for link up on the hardware. If link is not up and we have | ||
507 | * a signal, then we need to force link up. | ||
508 | **/ | ||
509 | s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | ||
510 | { | ||
511 | struct e1000_mac_info *mac = &hw->mac; | ||
512 | u32 rxcw; | ||
513 | u32 ctrl; | ||
514 | u32 status; | ||
515 | s32 ret_val; | ||
516 | |||
517 | ctrl = er32(CTRL); | ||
518 | status = er32(STATUS); | ||
519 | rxcw = er32(RXCW); | ||
520 | |||
521 | /* | ||
522 | * If we don't have link (auto-negotiation failed or link partner | ||
523 | * cannot auto-negotiate), the cable is plugged in (we have signal), | ||
524 | * and our link partner is not trying to auto-negotiate with us (we | ||
525 | * are receiving idles or data), we need to force link up. We also | ||
526 | * need to give auto-negotiation time to complete, in case the cable | ||
527 | * was just plugged in. The autoneg_failed flag does this. | ||
528 | */ | ||
529 | /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ | ||
530 | if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && | ||
531 | (!(rxcw & E1000_RXCW_C))) { | ||
532 | if (mac->autoneg_failed == 0) { | ||
533 | mac->autoneg_failed = 1; | ||
534 | return 0; | ||
535 | } | ||
536 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); | ||
537 | |||
538 | /* Disable auto-negotiation in the TXCW register */ | ||
539 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | ||
540 | |||
541 | /* Force link-up and also force full-duplex. */ | ||
542 | ctrl = er32(CTRL); | ||
543 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | ||
544 | ew32(CTRL, ctrl); | ||
545 | |||
546 | /* Configure Flow Control after forcing link up. */ | ||
547 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
548 | if (ret_val) { | ||
549 | e_dbg("Error configuring flow control\n"); | ||
550 | return ret_val; | ||
551 | } | ||
552 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | ||
553 | /* | ||
554 | * If we are forcing link and we are receiving /C/ ordered | ||
555 | * sets, re-enable auto-negotiation in the TXCW register | ||
556 | * and disable forced link in the Device Control register | ||
557 | * in an attempt to auto-negotiate with our link partner. | ||
558 | */ | ||
559 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); | ||
560 | ew32(TXCW, mac->txcw); | ||
561 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | ||
562 | |||
563 | mac->serdes_has_link = true; | ||
564 | } | ||
565 | |||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | /** | ||
570 | * e1000e_check_for_serdes_link - Check for link (Serdes) | ||
571 | * @hw: pointer to the HW structure | ||
572 | * | ||
573 | * Checks for link up on the hardware. If link is not up and we have | ||
574 | * a signal, then we need to force link up. | ||
575 | **/ | ||
576 | s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | ||
577 | { | ||
578 | struct e1000_mac_info *mac = &hw->mac; | ||
579 | u32 rxcw; | ||
580 | u32 ctrl; | ||
581 | u32 status; | ||
582 | s32 ret_val; | ||
583 | |||
584 | ctrl = er32(CTRL); | ||
585 | status = er32(STATUS); | ||
586 | rxcw = er32(RXCW); | ||
587 | |||
588 | /* | ||
589 | * If we don't have link (auto-negotiation failed or link partner | ||
590 | * cannot auto-negotiate), and our link partner is not trying to | ||
591 | * auto-negotiate with us (we are receiving idles or data), | ||
592 | * we need to force link up. We also need to give auto-negotiation | ||
593 | * time to complete. | ||
594 | */ | ||
595 | /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ | ||
596 | if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { | ||
597 | if (mac->autoneg_failed == 0) { | ||
598 | mac->autoneg_failed = 1; | ||
599 | return 0; | ||
600 | } | ||
601 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); | ||
602 | |||
603 | /* Disable auto-negotiation in the TXCW register */ | ||
604 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | ||
605 | |||
606 | /* Force link-up and also force full-duplex. */ | ||
607 | ctrl = er32(CTRL); | ||
608 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | ||
609 | ew32(CTRL, ctrl); | ||
610 | |||
611 | /* Configure Flow Control after forcing link up. */ | ||
612 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
613 | if (ret_val) { | ||
614 | e_dbg("Error configuring flow control\n"); | ||
615 | return ret_val; | ||
616 | } | ||
617 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | ||
618 | /* | ||
619 | * If we are forcing link and we are receiving /C/ ordered | ||
620 | * sets, re-enable auto-negotiation in the TXCW register | ||
621 | * and disable forced link in the Device Control register | ||
622 | * in an attempt to auto-negotiate with our link partner. | ||
623 | */ | ||
624 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); | ||
625 | ew32(TXCW, mac->txcw); | ||
626 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | ||
627 | |||
628 | mac->serdes_has_link = true; | ||
629 | } else if (!(E1000_TXCW_ANE & er32(TXCW))) { | ||
630 | /* | ||
631 | * If we force link for non-auto-negotiation switch, check | ||
632 | * link status based on MAC synchronization for internal | ||
633 | * serdes media type. | ||
634 | */ | ||
635 | /* SYNCH bit and IV bit are sticky. */ | ||
636 | udelay(10); | ||
637 | rxcw = er32(RXCW); | ||
638 | if (rxcw & E1000_RXCW_SYNCH) { | ||
639 | if (!(rxcw & E1000_RXCW_IV)) { | ||
640 | mac->serdes_has_link = true; | ||
641 | e_dbg("SERDES: Link up - forced.\n"); | ||
642 | } | ||
643 | } else { | ||
644 | mac->serdes_has_link = false; | ||
645 | e_dbg("SERDES: Link down - force failed.\n"); | ||
646 | } | ||
647 | } | ||
648 | |||
649 | if (E1000_TXCW_ANE & er32(TXCW)) { | ||
650 | status = er32(STATUS); | ||
651 | if (status & E1000_STATUS_LU) { | ||
652 | /* SYNCH bit and IV bit are sticky, so reread rxcw. */ | ||
653 | udelay(10); | ||
654 | rxcw = er32(RXCW); | ||
655 | if (rxcw & E1000_RXCW_SYNCH) { | ||
656 | if (!(rxcw & E1000_RXCW_IV)) { | ||
657 | mac->serdes_has_link = true; | ||
658 | e_dbg("SERDES: Link up - autoneg " | ||
659 | "completed successfully.\n"); | ||
660 | } else { | ||
661 | mac->serdes_has_link = false; | ||
662 | e_dbg("SERDES: Link down - invalid" | ||
663 | "codewords detected in autoneg.\n"); | ||
664 | } | ||
665 | } else { | ||
666 | mac->serdes_has_link = false; | ||
667 | e_dbg("SERDES: Link down - no sync.\n"); | ||
668 | } | ||
669 | } else { | ||
670 | mac->serdes_has_link = false; | ||
671 | e_dbg("SERDES: Link down - autoneg failed\n"); | ||
672 | } | ||
673 | } | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /** | ||
679 | * e1000_set_default_fc_generic - Set flow control default values | ||
680 | * @hw: pointer to the HW structure | ||
681 | * | ||
682 | * Read the EEPROM for the default values for flow control and store the | ||
683 | * values. | ||
684 | **/ | ||
685 | static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | ||
686 | { | ||
687 | s32 ret_val; | ||
688 | u16 nvm_data; | ||
689 | |||
690 | /* | ||
691 | * Read and store word 0x0F of the EEPROM. This word contains bits | ||
692 | * that determine the hardware's default PAUSE (flow control) mode, | ||
693 | * a bit that determines whether the HW defaults to enabling or | ||
694 | * disabling auto-negotiation, and the direction of the | ||
695 | * SW defined pins. If there is no SW over-ride of the flow | ||
696 | * control setting, then the variable hw->fc will | ||
697 | * be initialized based on a value in the EEPROM. | ||
698 | */ | ||
699 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); | ||
700 | |||
701 | if (ret_val) { | ||
702 | e_dbg("NVM Read Error\n"); | ||
703 | return ret_val; | ||
704 | } | ||
705 | |||
706 | if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) | ||
707 | hw->fc.requested_mode = e1000_fc_none; | ||
708 | else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == | ||
709 | NVM_WORD0F_ASM_DIR) | ||
710 | hw->fc.requested_mode = e1000_fc_tx_pause; | ||
711 | else | ||
712 | hw->fc.requested_mode = e1000_fc_full; | ||
713 | |||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | /** | ||
718 | * e1000e_setup_link - Setup flow control and link settings | ||
719 | * @hw: pointer to the HW structure | ||
720 | * | ||
721 | * Determines which flow control settings to use, then configures flow | ||
722 | * control. Calls the appropriate media-specific link configuration | ||
723 | * function. Assuming the adapter has a valid link partner, a valid link | ||
724 | * should be established. Assumes the hardware has previously been reset | ||
725 | * and the transmitter and receiver are not enabled. | ||
726 | **/ | ||
727 | s32 e1000e_setup_link(struct e1000_hw *hw) | ||
728 | { | ||
729 | struct e1000_mac_info *mac = &hw->mac; | ||
730 | s32 ret_val; | ||
731 | |||
732 | /* | ||
733 | * In the case of the phy reset being blocked, we already have a link. | ||
734 | * We do not need to set it up again. | ||
735 | */ | ||
736 | if (e1000_check_reset_block(hw)) | ||
737 | return 0; | ||
738 | |||
739 | /* | ||
740 | * If requested flow control is set to default, set flow control | ||
741 | * based on the EEPROM flow control settings. | ||
742 | */ | ||
743 | if (hw->fc.requested_mode == e1000_fc_default) { | ||
744 | ret_val = e1000_set_default_fc_generic(hw); | ||
745 | if (ret_val) | ||
746 | return ret_val; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Save off the requested flow control mode for use later. Depending | ||
751 | * on the link partner's capabilities, we may or may not use this mode. | ||
752 | */ | ||
753 | hw->fc.current_mode = hw->fc.requested_mode; | ||
754 | |||
755 | e_dbg("After fix-ups FlowControl is now = %x\n", | ||
756 | hw->fc.current_mode); | ||
757 | |||
758 | /* Call the necessary media_type subroutine to configure the link. */ | ||
759 | ret_val = mac->ops.setup_physical_interface(hw); | ||
760 | if (ret_val) | ||
761 | return ret_val; | ||
762 | |||
763 | /* | ||
764 | * Initialize the flow control address, type, and PAUSE timer | ||
765 | * registers to their default values. This is done even if flow | ||
766 | * control is disabled, because it does not hurt anything to | ||
767 | * initialize these registers. | ||
768 | */ | ||
769 | e_dbg("Initializing the Flow Control address, type and timer regs\n"); | ||
770 | ew32(FCT, FLOW_CONTROL_TYPE); | ||
771 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | ||
772 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | ||
773 | |||
774 | ew32(FCTTV, hw->fc.pause_time); | ||
775 | |||
776 | return e1000e_set_fc_watermarks(hw); | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * e1000_commit_fc_settings_generic - Configure flow control | ||
781 | * @hw: pointer to the HW structure | ||
782 | * | ||
783 | * Write the flow control settings to the Transmit Config Word Register (TXCW) | ||
784 | * base on the flow control settings in e1000_mac_info. | ||
785 | **/ | ||
786 | static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | ||
787 | { | ||
788 | struct e1000_mac_info *mac = &hw->mac; | ||
789 | u32 txcw; | ||
790 | |||
791 | /* | ||
792 | * Check for a software override of the flow control settings, and | ||
793 | * setup the device accordingly. If auto-negotiation is enabled, then | ||
794 | * software will have to set the "PAUSE" bits to the correct value in | ||
795 | * the Transmit Config Word Register (TXCW) and re-start auto- | ||
796 | * negotiation. However, if auto-negotiation is disabled, then | ||
797 | * software will have to manually configure the two flow control enable | ||
798 | * bits in the CTRL register. | ||
799 | * | ||
800 | * The possible values of the "fc" parameter are: | ||
801 | * 0: Flow control is completely disabled | ||
802 | * 1: Rx flow control is enabled (we can receive pause frames, | ||
803 | * but not send pause frames). | ||
804 | * 2: Tx flow control is enabled (we can send pause frames but we | ||
805 | * do not support receiving pause frames). | ||
806 | * 3: Both Rx and Tx flow control (symmetric) are enabled. | ||
807 | */ | ||
808 | switch (hw->fc.current_mode) { | ||
809 | case e1000_fc_none: | ||
810 | /* Flow control completely disabled by a software over-ride. */ | ||
811 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); | ||
812 | break; | ||
813 | case e1000_fc_rx_pause: | ||
814 | /* | ||
815 | * Rx Flow control is enabled and Tx Flow control is disabled | ||
816 | * by a software over-ride. Since there really isn't a way to | ||
817 | * advertise that we are capable of Rx Pause ONLY, we will | ||
818 | * advertise that we support both symmetric and asymmetric Rx | ||
819 | * PAUSE. Later, we will disable the adapter's ability to send | ||
820 | * PAUSE frames. | ||
821 | */ | ||
822 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | ||
823 | break; | ||
824 | case e1000_fc_tx_pause: | ||
825 | /* | ||
826 | * Tx Flow control is enabled, and Rx Flow control is disabled, | ||
827 | * by a software over-ride. | ||
828 | */ | ||
829 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); | ||
830 | break; | ||
831 | case e1000_fc_full: | ||
832 | /* | ||
833 | * Flow control (both Rx and Tx) is enabled by a software | ||
834 | * over-ride. | ||
835 | */ | ||
836 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | ||
837 | break; | ||
838 | default: | ||
839 | e_dbg("Flow control param set incorrectly\n"); | ||
840 | return -E1000_ERR_CONFIG; | ||
841 | break; | ||
842 | } | ||
843 | |||
844 | ew32(TXCW, txcw); | ||
845 | mac->txcw = txcw; | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | /** | ||
851 | * e1000_poll_fiber_serdes_link_generic - Poll for link up | ||
852 | * @hw: pointer to the HW structure | ||
853 | * | ||
854 | * Polls for link up by reading the status register, if link fails to come | ||
855 | * up with auto-negotiation, then the link is forced if a signal is detected. | ||
856 | **/ | ||
857 | static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | ||
858 | { | ||
859 | struct e1000_mac_info *mac = &hw->mac; | ||
860 | u32 i, status; | ||
861 | s32 ret_val; | ||
862 | |||
863 | /* | ||
864 | * If we have a signal (the cable is plugged in, or assumed true for | ||
865 | * serdes media) then poll for a "Link-Up" indication in the Device | ||
866 | * Status Register. Time-out if a link isn't seen in 500 milliseconds | ||
867 | * seconds (Auto-negotiation should complete in less than 500 | ||
868 | * milliseconds even if the other end is doing it in SW). | ||
869 | */ | ||
870 | for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { | ||
871 | usleep_range(10000, 20000); | ||
872 | status = er32(STATUS); | ||
873 | if (status & E1000_STATUS_LU) | ||
874 | break; | ||
875 | } | ||
876 | if (i == FIBER_LINK_UP_LIMIT) { | ||
877 | e_dbg("Never got a valid link from auto-neg!!!\n"); | ||
878 | mac->autoneg_failed = 1; | ||
879 | /* | ||
880 | * AutoNeg failed to achieve a link, so we'll call | ||
881 | * mac->check_for_link. This routine will force the | ||
882 | * link up if we detect a signal. This will allow us to | ||
883 | * communicate with non-autonegotiating link partners. | ||
884 | */ | ||
885 | ret_val = mac->ops.check_for_link(hw); | ||
886 | if (ret_val) { | ||
887 | e_dbg("Error while checking for link\n"); | ||
888 | return ret_val; | ||
889 | } | ||
890 | mac->autoneg_failed = 0; | ||
891 | } else { | ||
892 | mac->autoneg_failed = 0; | ||
893 | e_dbg("Valid Link Found\n"); | ||
894 | } | ||
895 | |||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes | ||
901 | * @hw: pointer to the HW structure | ||
902 | * | ||
903 | * Configures collision distance and flow control for fiber and serdes | ||
904 | * links. Upon successful setup, poll for link. | ||
905 | **/ | ||
906 | s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | ||
907 | { | ||
908 | u32 ctrl; | ||
909 | s32 ret_val; | ||
910 | |||
911 | ctrl = er32(CTRL); | ||
912 | |||
913 | /* Take the link out of reset */ | ||
914 | ctrl &= ~E1000_CTRL_LRST; | ||
915 | |||
916 | e1000e_config_collision_dist(hw); | ||
917 | |||
918 | ret_val = e1000_commit_fc_settings_generic(hw); | ||
919 | if (ret_val) | ||
920 | return ret_val; | ||
921 | |||
922 | /* | ||
923 | * Since auto-negotiation is enabled, take the link out of reset (the | ||
924 | * link will be in reset, because we previously reset the chip). This | ||
925 | * will restart auto-negotiation. If auto-negotiation is successful | ||
926 | * then the link-up status bit will be set and the flow control enable | ||
927 | * bits (RFCE and TFCE) will be set according to their negotiated value. | ||
928 | */ | ||
929 | e_dbg("Auto-negotiation enabled\n"); | ||
930 | |||
931 | ew32(CTRL, ctrl); | ||
932 | e1e_flush(); | ||
933 | usleep_range(1000, 2000); | ||
934 | |||
935 | /* | ||
936 | * For these adapters, the SW definable pin 1 is set when the optics | ||
937 | * detect a signal. If we have a signal, then poll for a "Link-Up" | ||
938 | * indication. | ||
939 | */ | ||
940 | if (hw->phy.media_type == e1000_media_type_internal_serdes || | ||
941 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | ||
942 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | ||
943 | } else { | ||
944 | e_dbg("No signal detected\n"); | ||
945 | } | ||
946 | |||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * e1000e_config_collision_dist - Configure collision distance | ||
952 | * @hw: pointer to the HW structure | ||
953 | * | ||
954 | * Configures the collision distance to the default value and is used | ||
955 | * during link setup. Currently no func pointer exists and all | ||
956 | * implementations are handled in the generic version of this function. | ||
957 | **/ | ||
958 | void e1000e_config_collision_dist(struct e1000_hw *hw) | ||
959 | { | ||
960 | u32 tctl; | ||
961 | |||
962 | tctl = er32(TCTL); | ||
963 | |||
964 | tctl &= ~E1000_TCTL_COLD; | ||
965 | tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; | ||
966 | |||
967 | ew32(TCTL, tctl); | ||
968 | e1e_flush(); | ||
969 | } | ||
970 | |||
971 | /** | ||
972 | * e1000e_set_fc_watermarks - Set flow control high/low watermarks | ||
973 | * @hw: pointer to the HW structure | ||
974 | * | ||
975 | * Sets the flow control high/low threshold (watermark) registers. If | ||
976 | * flow control XON frame transmission is enabled, then set XON frame | ||
977 | * transmission as well. | ||
978 | **/ | ||
979 | s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) | ||
980 | { | ||
981 | u32 fcrtl = 0, fcrth = 0; | ||
982 | |||
983 | /* | ||
984 | * Set the flow control receive threshold registers. Normally, | ||
985 | * these registers will be set to a default threshold that may be | ||
986 | * adjusted later by the driver's runtime code. However, if the | ||
987 | * ability to transmit pause frames is not enabled, then these | ||
988 | * registers will be set to 0. | ||
989 | */ | ||
990 | if (hw->fc.current_mode & e1000_fc_tx_pause) { | ||
991 | /* | ||
992 | * We need to set up the Receive Threshold high and low water | ||
993 | * marks as well as (optionally) enabling the transmission of | ||
994 | * XON frames. | ||
995 | */ | ||
996 | fcrtl = hw->fc.low_water; | ||
997 | fcrtl |= E1000_FCRTL_XONE; | ||
998 | fcrth = hw->fc.high_water; | ||
999 | } | ||
1000 | ew32(FCRTL, fcrtl); | ||
1001 | ew32(FCRTH, fcrth); | ||
1002 | |||
1003 | return 0; | ||
1004 | } | ||
1005 | |||
1006 | /** | ||
1007 | * e1000e_force_mac_fc - Force the MAC's flow control settings | ||
1008 | * @hw: pointer to the HW structure | ||
1009 | * | ||
1010 | * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the | ||
1011 | * device control register to reflect the adapter settings. TFCE and RFCE | ||
1012 | * need to be explicitly set by software when a copper PHY is used because | ||
1013 | * autonegotiation is managed by the PHY rather than the MAC. Software must | ||
1014 | * also configure these bits when link is forced on a fiber connection. | ||
1015 | **/ | ||
1016 | s32 e1000e_force_mac_fc(struct e1000_hw *hw) | ||
1017 | { | ||
1018 | u32 ctrl; | ||
1019 | |||
1020 | ctrl = er32(CTRL); | ||
1021 | |||
1022 | /* | ||
1023 | * Because we didn't get link via the internal auto-negotiation | ||
1024 | * mechanism (we either forced link or we got link via PHY | ||
1025 | * auto-neg), we have to manually enable/disable transmit an | ||
1026 | * receive flow control. | ||
1027 | * | ||
1028 | * The "Case" statement below enables/disable flow control | ||
1029 | * according to the "hw->fc.current_mode" parameter. | ||
1030 | * | ||
1031 | * The possible values of the "fc" parameter are: | ||
1032 | * 0: Flow control is completely disabled | ||
1033 | * 1: Rx flow control is enabled (we can receive pause | ||
1034 | * frames but not send pause frames). | ||
1035 | * 2: Tx flow control is enabled (we can send pause frames | ||
1036 | * frames but we do not receive pause frames). | ||
1037 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | ||
1038 | * other: No other values should be possible at this point. | ||
1039 | */ | ||
1040 | e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); | ||
1041 | |||
1042 | switch (hw->fc.current_mode) { | ||
1043 | case e1000_fc_none: | ||
1044 | ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); | ||
1045 | break; | ||
1046 | case e1000_fc_rx_pause: | ||
1047 | ctrl &= (~E1000_CTRL_TFCE); | ||
1048 | ctrl |= E1000_CTRL_RFCE; | ||
1049 | break; | ||
1050 | case e1000_fc_tx_pause: | ||
1051 | ctrl &= (~E1000_CTRL_RFCE); | ||
1052 | ctrl |= E1000_CTRL_TFCE; | ||
1053 | break; | ||
1054 | case e1000_fc_full: | ||
1055 | ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); | ||
1056 | break; | ||
1057 | default: | ||
1058 | e_dbg("Flow control param set incorrectly\n"); | ||
1059 | return -E1000_ERR_CONFIG; | ||
1060 | } | ||
1061 | |||
1062 | ew32(CTRL, ctrl); | ||
1063 | |||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * e1000e_config_fc_after_link_up - Configures flow control after link | ||
1069 | * @hw: pointer to the HW structure | ||
1070 | * | ||
1071 | * Checks the status of auto-negotiation after link up to ensure that the | ||
1072 | * speed and duplex were not forced. If the link needed to be forced, then | ||
1073 | * flow control needs to be forced also. If auto-negotiation is enabled | ||
1074 | * and did not fail, then we configure flow control based on our link | ||
1075 | * partner. | ||
1076 | **/ | ||
1077 | s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | ||
1078 | { | ||
1079 | struct e1000_mac_info *mac = &hw->mac; | ||
1080 | s32 ret_val = 0; | ||
1081 | u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; | ||
1082 | u16 speed, duplex; | ||
1083 | |||
1084 | /* | ||
1085 | * Check for the case where we have fiber media and auto-neg failed | ||
1086 | * so we had to force link. In this case, we need to force the | ||
1087 | * configuration of the MAC to match the "fc" parameter. | ||
1088 | */ | ||
1089 | if (mac->autoneg_failed) { | ||
1090 | if (hw->phy.media_type == e1000_media_type_fiber || | ||
1091 | hw->phy.media_type == e1000_media_type_internal_serdes) | ||
1092 | ret_val = e1000e_force_mac_fc(hw); | ||
1093 | } else { | ||
1094 | if (hw->phy.media_type == e1000_media_type_copper) | ||
1095 | ret_val = e1000e_force_mac_fc(hw); | ||
1096 | } | ||
1097 | |||
1098 | if (ret_val) { | ||
1099 | e_dbg("Error forcing flow control settings\n"); | ||
1100 | return ret_val; | ||
1101 | } | ||
1102 | |||
1103 | /* | ||
1104 | * Check for the case where we have copper media and auto-neg is | ||
1105 | * enabled. In this case, we need to check and see if Auto-Neg | ||
1106 | * has completed, and if so, how the PHY and link partner has | ||
1107 | * flow control configured. | ||
1108 | */ | ||
1109 | if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { | ||
1110 | /* | ||
1111 | * Read the MII Status Register and check to see if AutoNeg | ||
1112 | * has completed. We read this twice because this reg has | ||
1113 | * some "sticky" (latched) bits. | ||
1114 | */ | ||
1115 | ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); | ||
1116 | if (ret_val) | ||
1117 | return ret_val; | ||
1118 | ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); | ||
1119 | if (ret_val) | ||
1120 | return ret_val; | ||
1121 | |||
1122 | if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { | ||
1123 | e_dbg("Copper PHY and Auto Neg " | ||
1124 | "has not completed.\n"); | ||
1125 | return ret_val; | ||
1126 | } | ||
1127 | |||
1128 | /* | ||
1129 | * The AutoNeg process has completed, so we now need to | ||
1130 | * read both the Auto Negotiation Advertisement | ||
1131 | * Register (Address 4) and the Auto_Negotiation Base | ||
1132 | * Page Ability Register (Address 5) to determine how | ||
1133 | * flow control was negotiated. | ||
1134 | */ | ||
1135 | ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); | ||
1136 | if (ret_val) | ||
1137 | return ret_val; | ||
1138 | ret_val = | ||
1139 | e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); | ||
1140 | if (ret_val) | ||
1141 | return ret_val; | ||
1142 | |||
1143 | /* | ||
1144 | * Two bits in the Auto Negotiation Advertisement Register | ||
1145 | * (Address 4) and two bits in the Auto Negotiation Base | ||
1146 | * Page Ability Register (Address 5) determine flow control | ||
1147 | * for both the PHY and the link partner. The following | ||
1148 | * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, | ||
1149 | * 1999, describes these PAUSE resolution bits and how flow | ||
1150 | * control is determined based upon these settings. | ||
1151 | * NOTE: DC = Don't Care | ||
1152 | * | ||
1153 | * LOCAL DEVICE | LINK PARTNER | ||
1154 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution | ||
1155 | *-------|---------|-------|---------|-------------------- | ||
1156 | * 0 | 0 | DC | DC | e1000_fc_none | ||
1157 | * 0 | 1 | 0 | DC | e1000_fc_none | ||
1158 | * 0 | 1 | 1 | 0 | e1000_fc_none | ||
1159 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause | ||
1160 | * 1 | 0 | 0 | DC | e1000_fc_none | ||
1161 | * 1 | DC | 1 | DC | e1000_fc_full | ||
1162 | * 1 | 1 | 0 | 0 | e1000_fc_none | ||
1163 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | ||
1164 | * | ||
1165 | * Are both PAUSE bits set to 1? If so, this implies | ||
1166 | * Symmetric Flow Control is enabled at both ends. The | ||
1167 | * ASM_DIR bits are irrelevant per the spec. | ||
1168 | * | ||
1169 | * For Symmetric Flow Control: | ||
1170 | * | ||
1171 | * LOCAL DEVICE | LINK PARTNER | ||
1172 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | ||
1173 | *-------|---------|-------|---------|-------------------- | ||
1174 | * 1 | DC | 1 | DC | E1000_fc_full | ||
1175 | * | ||
1176 | */ | ||
1177 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | ||
1178 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { | ||
1179 | /* | ||
1180 | * Now we need to check if the user selected Rx ONLY | ||
1181 | * of pause frames. In this case, we had to advertise | ||
1182 | * FULL flow control because we could not advertise Rx | ||
1183 | * ONLY. Hence, we must now check to see if we need to | ||
1184 | * turn OFF the TRANSMISSION of PAUSE frames. | ||
1185 | */ | ||
1186 | if (hw->fc.requested_mode == e1000_fc_full) { | ||
1187 | hw->fc.current_mode = e1000_fc_full; | ||
1188 | e_dbg("Flow Control = FULL.\r\n"); | ||
1189 | } else { | ||
1190 | hw->fc.current_mode = e1000_fc_rx_pause; | ||
1191 | e_dbg("Flow Control = " | ||
1192 | "Rx PAUSE frames only.\r\n"); | ||
1193 | } | ||
1194 | } | ||
1195 | /* | ||
1196 | * For receiving PAUSE frames ONLY. | ||
1197 | * | ||
1198 | * LOCAL DEVICE | LINK PARTNER | ||
1199 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | ||
1200 | *-------|---------|-------|---------|-------------------- | ||
1201 | * 0 | 1 | 1 | 1 | e1000_fc_tx_pause | ||
1202 | */ | ||
1203 | else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && | ||
1204 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | ||
1205 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | ||
1206 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | ||
1207 | hw->fc.current_mode = e1000_fc_tx_pause; | ||
1208 | e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); | ||
1209 | } | ||
1210 | /* | ||
1211 | * For transmitting PAUSE frames ONLY. | ||
1212 | * | ||
1213 | * LOCAL DEVICE | LINK PARTNER | ||
1214 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | ||
1215 | *-------|---------|-------|---------|-------------------- | ||
1216 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | ||
1217 | */ | ||
1218 | else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | ||
1219 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | ||
1220 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | ||
1221 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | ||
1222 | hw->fc.current_mode = e1000_fc_rx_pause; | ||
1223 | e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); | ||
1224 | } else { | ||
1225 | /* | ||
1226 | * Per the IEEE spec, at this point flow control | ||
1227 | * should be disabled. | ||
1228 | */ | ||
1229 | hw->fc.current_mode = e1000_fc_none; | ||
1230 | e_dbg("Flow Control = NONE.\r\n"); | ||
1231 | } | ||
1232 | |||
1233 | /* | ||
1234 | * Now we need to do one last check... If we auto- | ||
1235 | * negotiated to HALF DUPLEX, flow control should not be | ||
1236 | * enabled per IEEE 802.3 spec. | ||
1237 | */ | ||
1238 | ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); | ||
1239 | if (ret_val) { | ||
1240 | e_dbg("Error getting link speed and duplex\n"); | ||
1241 | return ret_val; | ||
1242 | } | ||
1243 | |||
1244 | if (duplex == HALF_DUPLEX) | ||
1245 | hw->fc.current_mode = e1000_fc_none; | ||
1246 | |||
1247 | /* | ||
1248 | * Now we call a subroutine to actually force the MAC | ||
1249 | * controller to use the correct flow control settings. | ||
1250 | */ | ||
1251 | ret_val = e1000e_force_mac_fc(hw); | ||
1252 | if (ret_val) { | ||
1253 | e_dbg("Error forcing flow control settings\n"); | ||
1254 | return ret_val; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | return 0; | ||
1259 | } | ||
1260 | |||
1261 | /** | ||
1262 | * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex | ||
1263 | * @hw: pointer to the HW structure | ||
1264 | * @speed: stores the current speed | ||
1265 | * @duplex: stores the current duplex | ||
1266 | * | ||
1267 | * Read the status register for the current speed/duplex and store the current | ||
1268 | * speed and duplex for copper connections. | ||
1269 | **/ | ||
1270 | s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) | ||
1271 | { | ||
1272 | u32 status; | ||
1273 | |||
1274 | status = er32(STATUS); | ||
1275 | if (status & E1000_STATUS_SPEED_1000) | ||
1276 | *speed = SPEED_1000; | ||
1277 | else if (status & E1000_STATUS_SPEED_100) | ||
1278 | *speed = SPEED_100; | ||
1279 | else | ||
1280 | *speed = SPEED_10; | ||
1281 | |||
1282 | if (status & E1000_STATUS_FD) | ||
1283 | *duplex = FULL_DUPLEX; | ||
1284 | else | ||
1285 | *duplex = HALF_DUPLEX; | ||
1286 | |||
1287 | e_dbg("%u Mbps, %s Duplex\n", | ||
1288 | *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, | ||
1289 | *duplex == FULL_DUPLEX ? "Full" : "Half"); | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | /** | ||
1295 | * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex | ||
1296 | * @hw: pointer to the HW structure | ||
1297 | * @speed: stores the current speed | ||
1298 | * @duplex: stores the current duplex | ||
1299 | * | ||
1300 | * Sets the speed and duplex to gigabit full duplex (the only possible option) | ||
1301 | * for fiber/serdes links. | ||
1302 | **/ | ||
1303 | s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) | ||
1304 | { | ||
1305 | *speed = SPEED_1000; | ||
1306 | *duplex = FULL_DUPLEX; | ||
1307 | |||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | /** | ||
1312 | * e1000e_get_hw_semaphore - Acquire hardware semaphore | ||
1313 | * @hw: pointer to the HW structure | ||
1314 | * | ||
1315 | * Acquire the HW semaphore to access the PHY or NVM | ||
1316 | **/ | ||
1317 | s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) | ||
1318 | { | ||
1319 | u32 swsm; | ||
1320 | s32 timeout = hw->nvm.word_size + 1; | ||
1321 | s32 i = 0; | ||
1322 | |||
1323 | /* Get the SW semaphore */ | ||
1324 | while (i < timeout) { | ||
1325 | swsm = er32(SWSM); | ||
1326 | if (!(swsm & E1000_SWSM_SMBI)) | ||
1327 | break; | ||
1328 | |||
1329 | udelay(50); | ||
1330 | i++; | ||
1331 | } | ||
1332 | |||
1333 | if (i == timeout) { | ||
1334 | e_dbg("Driver can't access device - SMBI bit is set.\n"); | ||
1335 | return -E1000_ERR_NVM; | ||
1336 | } | ||
1337 | |||
1338 | /* Get the FW semaphore. */ | ||
1339 | for (i = 0; i < timeout; i++) { | ||
1340 | swsm = er32(SWSM); | ||
1341 | ew32(SWSM, swsm | E1000_SWSM_SWESMBI); | ||
1342 | |||
1343 | /* Semaphore acquired if bit latched */ | ||
1344 | if (er32(SWSM) & E1000_SWSM_SWESMBI) | ||
1345 | break; | ||
1346 | |||
1347 | udelay(50); | ||
1348 | } | ||
1349 | |||
1350 | if (i == timeout) { | ||
1351 | /* Release semaphores */ | ||
1352 | e1000e_put_hw_semaphore(hw); | ||
1353 | e_dbg("Driver can't access the NVM\n"); | ||
1354 | return -E1000_ERR_NVM; | ||
1355 | } | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | /** | ||
1361 | * e1000e_put_hw_semaphore - Release hardware semaphore | ||
1362 | * @hw: pointer to the HW structure | ||
1363 | * | ||
1364 | * Release hardware semaphore used to access the PHY or NVM | ||
1365 | **/ | ||
1366 | void e1000e_put_hw_semaphore(struct e1000_hw *hw) | ||
1367 | { | ||
1368 | u32 swsm; | ||
1369 | |||
1370 | swsm = er32(SWSM); | ||
1371 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | ||
1372 | ew32(SWSM, swsm); | ||
1373 | } | ||
1374 | |||
1375 | /** | ||
1376 | * e1000e_get_auto_rd_done - Check for auto read completion | ||
1377 | * @hw: pointer to the HW structure | ||
1378 | * | ||
1379 | * Check EEPROM for Auto Read done bit. | ||
1380 | **/ | ||
1381 | s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) | ||
1382 | { | ||
1383 | s32 i = 0; | ||
1384 | |||
1385 | while (i < AUTO_READ_DONE_TIMEOUT) { | ||
1386 | if (er32(EECD) & E1000_EECD_AUTO_RD) | ||
1387 | break; | ||
1388 | usleep_range(1000, 2000); | ||
1389 | i++; | ||
1390 | } | ||
1391 | |||
1392 | if (i == AUTO_READ_DONE_TIMEOUT) { | ||
1393 | e_dbg("Auto read by HW from NVM has not completed.\n"); | ||
1394 | return -E1000_ERR_RESET; | ||
1395 | } | ||
1396 | |||
1397 | return 0; | ||
1398 | } | ||
1399 | |||
1400 | /** | ||
1401 | * e1000e_valid_led_default - Verify a valid default LED config | ||
1402 | * @hw: pointer to the HW structure | ||
1403 | * @data: pointer to the NVM (EEPROM) | ||
1404 | * | ||
1405 | * Read the EEPROM for the current default LED configuration. If the | ||
1406 | * LED configuration is not valid, set to a valid LED configuration. | ||
1407 | **/ | ||
1408 | s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) | ||
1409 | { | ||
1410 | s32 ret_val; | ||
1411 | |||
1412 | ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); | ||
1413 | if (ret_val) { | ||
1414 | e_dbg("NVM Read Error\n"); | ||
1415 | return ret_val; | ||
1416 | } | ||
1417 | |||
1418 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) | ||
1419 | *data = ID_LED_DEFAULT; | ||
1420 | |||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | /** | ||
1425 | * e1000e_id_led_init - | ||
1426 | * @hw: pointer to the HW structure | ||
1427 | * | ||
1428 | **/ | ||
1429 | s32 e1000e_id_led_init(struct e1000_hw *hw) | ||
1430 | { | ||
1431 | struct e1000_mac_info *mac = &hw->mac; | ||
1432 | s32 ret_val; | ||
1433 | const u32 ledctl_mask = 0x000000FF; | ||
1434 | const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; | ||
1435 | const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; | ||
1436 | u16 data, i, temp; | ||
1437 | const u16 led_mask = 0x0F; | ||
1438 | |||
1439 | ret_val = hw->nvm.ops.valid_led_default(hw, &data); | ||
1440 | if (ret_val) | ||
1441 | return ret_val; | ||
1442 | |||
1443 | mac->ledctl_default = er32(LEDCTL); | ||
1444 | mac->ledctl_mode1 = mac->ledctl_default; | ||
1445 | mac->ledctl_mode2 = mac->ledctl_default; | ||
1446 | |||
1447 | for (i = 0; i < 4; i++) { | ||
1448 | temp = (data >> (i << 2)) & led_mask; | ||
1449 | switch (temp) { | ||
1450 | case ID_LED_ON1_DEF2: | ||
1451 | case ID_LED_ON1_ON2: | ||
1452 | case ID_LED_ON1_OFF2: | ||
1453 | mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); | ||
1454 | mac->ledctl_mode1 |= ledctl_on << (i << 3); | ||
1455 | break; | ||
1456 | case ID_LED_OFF1_DEF2: | ||
1457 | case ID_LED_OFF1_ON2: | ||
1458 | case ID_LED_OFF1_OFF2: | ||
1459 | mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); | ||
1460 | mac->ledctl_mode1 |= ledctl_off << (i << 3); | ||
1461 | break; | ||
1462 | default: | ||
1463 | /* Do nothing */ | ||
1464 | break; | ||
1465 | } | ||
1466 | switch (temp) { | ||
1467 | case ID_LED_DEF1_ON2: | ||
1468 | case ID_LED_ON1_ON2: | ||
1469 | case ID_LED_OFF1_ON2: | ||
1470 | mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); | ||
1471 | mac->ledctl_mode2 |= ledctl_on << (i << 3); | ||
1472 | break; | ||
1473 | case ID_LED_DEF1_OFF2: | ||
1474 | case ID_LED_ON1_OFF2: | ||
1475 | case ID_LED_OFF1_OFF2: | ||
1476 | mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); | ||
1477 | mac->ledctl_mode2 |= ledctl_off << (i << 3); | ||
1478 | break; | ||
1479 | default: | ||
1480 | /* Do nothing */ | ||
1481 | break; | ||
1482 | } | ||
1483 | } | ||
1484 | |||
1485 | return 0; | ||
1486 | } | ||
1487 | |||
1488 | /** | ||
1489 | * e1000e_setup_led_generic - Configures SW controllable LED | ||
1490 | * @hw: pointer to the HW structure | ||
1491 | * | ||
1492 | * This prepares the SW controllable LED for use and saves the current state | ||
1493 | * of the LED so it can be later restored. | ||
1494 | **/ | ||
1495 | s32 e1000e_setup_led_generic(struct e1000_hw *hw) | ||
1496 | { | ||
1497 | u32 ledctl; | ||
1498 | |||
1499 | if (hw->mac.ops.setup_led != e1000e_setup_led_generic) | ||
1500 | return -E1000_ERR_CONFIG; | ||
1501 | |||
1502 | if (hw->phy.media_type == e1000_media_type_fiber) { | ||
1503 | ledctl = er32(LEDCTL); | ||
1504 | hw->mac.ledctl_default = ledctl; | ||
1505 | /* Turn off LED0 */ | ||
1506 | ledctl &= ~(E1000_LEDCTL_LED0_IVRT | | ||
1507 | E1000_LEDCTL_LED0_BLINK | | ||
1508 | E1000_LEDCTL_LED0_MODE_MASK); | ||
1509 | ledctl |= (E1000_LEDCTL_MODE_LED_OFF << | ||
1510 | E1000_LEDCTL_LED0_MODE_SHIFT); | ||
1511 | ew32(LEDCTL, ledctl); | ||
1512 | } else if (hw->phy.media_type == e1000_media_type_copper) { | ||
1513 | ew32(LEDCTL, hw->mac.ledctl_mode1); | ||
1514 | } | ||
1515 | |||
1516 | return 0; | ||
1517 | } | ||
1518 | |||
1519 | /** | ||
1520 | * e1000e_cleanup_led_generic - Set LED config to default operation | ||
1521 | * @hw: pointer to the HW structure | ||
1522 | * | ||
1523 | * Remove the current LED configuration and set the LED configuration | ||
1524 | * to the default value, saved from the EEPROM. | ||
1525 | **/ | ||
1526 | s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) | ||
1527 | { | ||
1528 | ew32(LEDCTL, hw->mac.ledctl_default); | ||
1529 | return 0; | ||
1530 | } | ||
1531 | |||
1532 | /** | ||
1533 | * e1000e_blink_led_generic - Blink LED | ||
1534 | * @hw: pointer to the HW structure | ||
1535 | * | ||
1536 | * Blink the LEDs which are set to be on. | ||
1537 | **/ | ||
1538 | s32 e1000e_blink_led_generic(struct e1000_hw *hw) | ||
1539 | { | ||
1540 | u32 ledctl_blink = 0; | ||
1541 | u32 i; | ||
1542 | |||
1543 | if (hw->phy.media_type == e1000_media_type_fiber) { | ||
1544 | /* always blink LED0 for PCI-E fiber */ | ||
1545 | ledctl_blink = E1000_LEDCTL_LED0_BLINK | | ||
1546 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); | ||
1547 | } else { | ||
1548 | /* | ||
1549 | * set the blink bit for each LED that's "on" (0x0E) | ||
1550 | * in ledctl_mode2 | ||
1551 | */ | ||
1552 | ledctl_blink = hw->mac.ledctl_mode2; | ||
1553 | for (i = 0; i < 4; i++) | ||
1554 | if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == | ||
1555 | E1000_LEDCTL_MODE_LED_ON) | ||
1556 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << | ||
1557 | (i * 8)); | ||
1558 | } | ||
1559 | |||
1560 | ew32(LEDCTL, ledctl_blink); | ||
1561 | |||
1562 | return 0; | ||
1563 | } | ||
1564 | |||
1565 | /** | ||
1566 | * e1000e_led_on_generic - Turn LED on | ||
1567 | * @hw: pointer to the HW structure | ||
1568 | * | ||
1569 | * Turn LED on. | ||
1570 | **/ | ||
1571 | s32 e1000e_led_on_generic(struct e1000_hw *hw) | ||
1572 | { | ||
1573 | u32 ctrl; | ||
1574 | |||
1575 | switch (hw->phy.media_type) { | ||
1576 | case e1000_media_type_fiber: | ||
1577 | ctrl = er32(CTRL); | ||
1578 | ctrl &= ~E1000_CTRL_SWDPIN0; | ||
1579 | ctrl |= E1000_CTRL_SWDPIO0; | ||
1580 | ew32(CTRL, ctrl); | ||
1581 | break; | ||
1582 | case e1000_media_type_copper: | ||
1583 | ew32(LEDCTL, hw->mac.ledctl_mode2); | ||
1584 | break; | ||
1585 | default: | ||
1586 | break; | ||
1587 | } | ||
1588 | |||
1589 | return 0; | ||
1590 | } | ||
1591 | |||
1592 | /** | ||
1593 | * e1000e_led_off_generic - Turn LED off | ||
1594 | * @hw: pointer to the HW structure | ||
1595 | * | ||
1596 | * Turn LED off. | ||
1597 | **/ | ||
1598 | s32 e1000e_led_off_generic(struct e1000_hw *hw) | ||
1599 | { | ||
1600 | u32 ctrl; | ||
1601 | |||
1602 | switch (hw->phy.media_type) { | ||
1603 | case e1000_media_type_fiber: | ||
1604 | ctrl = er32(CTRL); | ||
1605 | ctrl |= E1000_CTRL_SWDPIN0; | ||
1606 | ctrl |= E1000_CTRL_SWDPIO0; | ||
1607 | ew32(CTRL, ctrl); | ||
1608 | break; | ||
1609 | case e1000_media_type_copper: | ||
1610 | ew32(LEDCTL, hw->mac.ledctl_mode1); | ||
1611 | break; | ||
1612 | default: | ||
1613 | break; | ||
1614 | } | ||
1615 | |||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | /** | ||
1620 | * e1000e_set_pcie_no_snoop - Set PCI-express capabilities | ||
1621 | * @hw: pointer to the HW structure | ||
1622 | * @no_snoop: bitmap of snoop events | ||
1623 | * | ||
1624 | * Set the PCI-express register to snoop for events enabled in 'no_snoop'. | ||
1625 | **/ | ||
1626 | void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) | ||
1627 | { | ||
1628 | u32 gcr; | ||
1629 | |||
1630 | if (no_snoop) { | ||
1631 | gcr = er32(GCR); | ||
1632 | gcr &= ~(PCIE_NO_SNOOP_ALL); | ||
1633 | gcr |= no_snoop; | ||
1634 | ew32(GCR, gcr); | ||
1635 | } | ||
1636 | } | ||
1637 | |||
1638 | /** | ||
1639 | * e1000e_disable_pcie_master - Disables PCI-express master access | ||
1640 | * @hw: pointer to the HW structure | ||
1641 | * | ||
1642 | * Returns 0 if successful, else returns -10 | ||
1643 | * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused | ||
1644 | * the master requests to be disabled. | ||
1645 | * | ||
1646 | * Disables PCI-Express master access and verifies there are no pending | ||
1647 | * requests. | ||
1648 | **/ | ||
1649 | s32 e1000e_disable_pcie_master(struct e1000_hw *hw) | ||
1650 | { | ||
1651 | u32 ctrl; | ||
1652 | s32 timeout = MASTER_DISABLE_TIMEOUT; | ||
1653 | |||
1654 | ctrl = er32(CTRL); | ||
1655 | ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; | ||
1656 | ew32(CTRL, ctrl); | ||
1657 | |||
1658 | while (timeout) { | ||
1659 | if (!(er32(STATUS) & | ||
1660 | E1000_STATUS_GIO_MASTER_ENABLE)) | ||
1661 | break; | ||
1662 | udelay(100); | ||
1663 | timeout--; | ||
1664 | } | ||
1665 | |||
1666 | if (!timeout) { | ||
1667 | e_dbg("Master requests are pending.\n"); | ||
1668 | return -E1000_ERR_MASTER_REQUESTS_PENDING; | ||
1669 | } | ||
1670 | |||
1671 | return 0; | ||
1672 | } | ||
1673 | |||
1674 | /** | ||
1675 | * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing | ||
1676 | * @hw: pointer to the HW structure | ||
1677 | * | ||
1678 | * Reset the Adaptive Interframe Spacing throttle to default values. | ||
1679 | **/ | ||
1680 | void e1000e_reset_adaptive(struct e1000_hw *hw) | ||
1681 | { | ||
1682 | struct e1000_mac_info *mac = &hw->mac; | ||
1683 | |||
1684 | if (!mac->adaptive_ifs) { | ||
1685 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
1686 | goto out; | ||
1687 | } | ||
1688 | |||
1689 | mac->current_ifs_val = 0; | ||
1690 | mac->ifs_min_val = IFS_MIN; | ||
1691 | mac->ifs_max_val = IFS_MAX; | ||
1692 | mac->ifs_step_size = IFS_STEP; | ||
1693 | mac->ifs_ratio = IFS_RATIO; | ||
1694 | |||
1695 | mac->in_ifs_mode = false; | ||
1696 | ew32(AIT, 0); | ||
1697 | out: | ||
1698 | return; | ||
1699 | } | ||
1700 | |||
1701 | /** | ||
1702 | * e1000e_update_adaptive - Update Adaptive Interframe Spacing | ||
1703 | * @hw: pointer to the HW structure | ||
1704 | * | ||
1705 | * Update the Adaptive Interframe Spacing Throttle value based on the | ||
1706 | * time between transmitted packets and time between collisions. | ||
1707 | **/ | ||
1708 | void e1000e_update_adaptive(struct e1000_hw *hw) | ||
1709 | { | ||
1710 | struct e1000_mac_info *mac = &hw->mac; | ||
1711 | |||
1712 | if (!mac->adaptive_ifs) { | ||
1713 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
1714 | goto out; | ||
1715 | } | ||
1716 | |||
1717 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | ||
1718 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | ||
1719 | mac->in_ifs_mode = true; | ||
1720 | if (mac->current_ifs_val < mac->ifs_max_val) { | ||
1721 | if (!mac->current_ifs_val) | ||
1722 | mac->current_ifs_val = mac->ifs_min_val; | ||
1723 | else | ||
1724 | mac->current_ifs_val += | ||
1725 | mac->ifs_step_size; | ||
1726 | ew32(AIT, mac->current_ifs_val); | ||
1727 | } | ||
1728 | } | ||
1729 | } else { | ||
1730 | if (mac->in_ifs_mode && | ||
1731 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { | ||
1732 | mac->current_ifs_val = 0; | ||
1733 | mac->in_ifs_mode = false; | ||
1734 | ew32(AIT, 0); | ||
1735 | } | ||
1736 | } | ||
1737 | out: | ||
1738 | return; | ||
1739 | } | ||
1740 | |||
1741 | /** | ||
1742 | * e1000_raise_eec_clk - Raise EEPROM clock | ||
1743 | * @hw: pointer to the HW structure | ||
1744 | * @eecd: pointer to the EEPROM | ||
1745 | * | ||
1746 | * Enable/Raise the EEPROM clock bit. | ||
1747 | **/ | ||
1748 | static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) | ||
1749 | { | ||
1750 | *eecd = *eecd | E1000_EECD_SK; | ||
1751 | ew32(EECD, *eecd); | ||
1752 | e1e_flush(); | ||
1753 | udelay(hw->nvm.delay_usec); | ||
1754 | } | ||
1755 | |||
1756 | /** | ||
1757 | * e1000_lower_eec_clk - Lower EEPROM clock | ||
1758 | * @hw: pointer to the HW structure | ||
1759 | * @eecd: pointer to the EEPROM | ||
1760 | * | ||
1761 | * Clear/Lower the EEPROM clock bit. | ||
1762 | **/ | ||
1763 | static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) | ||
1764 | { | ||
1765 | *eecd = *eecd & ~E1000_EECD_SK; | ||
1766 | ew32(EECD, *eecd); | ||
1767 | e1e_flush(); | ||
1768 | udelay(hw->nvm.delay_usec); | ||
1769 | } | ||
1770 | |||
1771 | /** | ||
1772 | * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM | ||
1773 | * @hw: pointer to the HW structure | ||
1774 | * @data: data to send to the EEPROM | ||
1775 | * @count: number of bits to shift out | ||
1776 | * | ||
1777 | * We need to shift 'count' bits out to the EEPROM. So, the value in the | ||
1778 | * "data" parameter will be shifted out to the EEPROM one bit at a time. | ||
1779 | * In order to do this, "data" must be broken down into bits. | ||
1780 | **/ | ||
1781 | static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) | ||
1782 | { | ||
1783 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
1784 | u32 eecd = er32(EECD); | ||
1785 | u32 mask; | ||
1786 | |||
1787 | mask = 0x01 << (count - 1); | ||
1788 | if (nvm->type == e1000_nvm_eeprom_spi) | ||
1789 | eecd |= E1000_EECD_DO; | ||
1790 | |||
1791 | do { | ||
1792 | eecd &= ~E1000_EECD_DI; | ||
1793 | |||
1794 | if (data & mask) | ||
1795 | eecd |= E1000_EECD_DI; | ||
1796 | |||
1797 | ew32(EECD, eecd); | ||
1798 | e1e_flush(); | ||
1799 | |||
1800 | udelay(nvm->delay_usec); | ||
1801 | |||
1802 | e1000_raise_eec_clk(hw, &eecd); | ||
1803 | e1000_lower_eec_clk(hw, &eecd); | ||
1804 | |||
1805 | mask >>= 1; | ||
1806 | } while (mask); | ||
1807 | |||
1808 | eecd &= ~E1000_EECD_DI; | ||
1809 | ew32(EECD, eecd); | ||
1810 | } | ||
1811 | |||
1812 | /** | ||
1813 | * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM | ||
1814 | * @hw: pointer to the HW structure | ||
1815 | * @count: number of bits to shift in | ||
1816 | * | ||
1817 | * In order to read a register from the EEPROM, we need to shift 'count' bits | ||
1818 | * in from the EEPROM. Bits are "shifted in" by raising the clock input to | ||
1819 | * the EEPROM (setting the SK bit), and then reading the value of the data out | ||
1820 | * "DO" bit. During this "shifting in" process the data in "DI" bit should | ||
1821 | * always be clear. | ||
1822 | **/ | ||
1823 | static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) | ||
1824 | { | ||
1825 | u32 eecd; | ||
1826 | u32 i; | ||
1827 | u16 data; | ||
1828 | |||
1829 | eecd = er32(EECD); | ||
1830 | |||
1831 | eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); | ||
1832 | data = 0; | ||
1833 | |||
1834 | for (i = 0; i < count; i++) { | ||
1835 | data <<= 1; | ||
1836 | e1000_raise_eec_clk(hw, &eecd); | ||
1837 | |||
1838 | eecd = er32(EECD); | ||
1839 | |||
1840 | eecd &= ~E1000_EECD_DI; | ||
1841 | if (eecd & E1000_EECD_DO) | ||
1842 | data |= 1; | ||
1843 | |||
1844 | e1000_lower_eec_clk(hw, &eecd); | ||
1845 | } | ||
1846 | |||
1847 | return data; | ||
1848 | } | ||
1849 | |||
1850 | /** | ||
1851 | * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion | ||
1852 | * @hw: pointer to the HW structure | ||
1853 | * @ee_reg: EEPROM flag for polling | ||
1854 | * | ||
1855 | * Polls the EEPROM status bit for either read or write completion based | ||
1856 | * upon the value of 'ee_reg'. | ||
1857 | **/ | ||
1858 | s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) | ||
1859 | { | ||
1860 | u32 attempts = 100000; | ||
1861 | u32 i, reg = 0; | ||
1862 | |||
1863 | for (i = 0; i < attempts; i++) { | ||
1864 | if (ee_reg == E1000_NVM_POLL_READ) | ||
1865 | reg = er32(EERD); | ||
1866 | else | ||
1867 | reg = er32(EEWR); | ||
1868 | |||
1869 | if (reg & E1000_NVM_RW_REG_DONE) | ||
1870 | return 0; | ||
1871 | |||
1872 | udelay(5); | ||
1873 | } | ||
1874 | |||
1875 | return -E1000_ERR_NVM; | ||
1876 | } | ||
1877 | |||
1878 | /** | ||
1879 | * e1000e_acquire_nvm - Generic request for access to EEPROM | ||
1880 | * @hw: pointer to the HW structure | ||
1881 | * | ||
1882 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. | ||
1883 | * Return successful if access grant bit set, else clear the request for | ||
1884 | * EEPROM access and return -E1000_ERR_NVM (-1). | ||
1885 | **/ | ||
1886 | s32 e1000e_acquire_nvm(struct e1000_hw *hw) | ||
1887 | { | ||
1888 | u32 eecd = er32(EECD); | ||
1889 | s32 timeout = E1000_NVM_GRANT_ATTEMPTS; | ||
1890 | |||
1891 | ew32(EECD, eecd | E1000_EECD_REQ); | ||
1892 | eecd = er32(EECD); | ||
1893 | |||
1894 | while (timeout) { | ||
1895 | if (eecd & E1000_EECD_GNT) | ||
1896 | break; | ||
1897 | udelay(5); | ||
1898 | eecd = er32(EECD); | ||
1899 | timeout--; | ||
1900 | } | ||
1901 | |||
1902 | if (!timeout) { | ||
1903 | eecd &= ~E1000_EECD_REQ; | ||
1904 | ew32(EECD, eecd); | ||
1905 | e_dbg("Could not acquire NVM grant\n"); | ||
1906 | return -E1000_ERR_NVM; | ||
1907 | } | ||
1908 | |||
1909 | return 0; | ||
1910 | } | ||
1911 | |||
1912 | /** | ||
1913 | * e1000_standby_nvm - Return EEPROM to standby state | ||
1914 | * @hw: pointer to the HW structure | ||
1915 | * | ||
1916 | * Return the EEPROM to a standby state. | ||
1917 | **/ | ||
1918 | static void e1000_standby_nvm(struct e1000_hw *hw) | ||
1919 | { | ||
1920 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
1921 | u32 eecd = er32(EECD); | ||
1922 | |||
1923 | if (nvm->type == e1000_nvm_eeprom_spi) { | ||
1924 | /* Toggle CS to flush commands */ | ||
1925 | eecd |= E1000_EECD_CS; | ||
1926 | ew32(EECD, eecd); | ||
1927 | e1e_flush(); | ||
1928 | udelay(nvm->delay_usec); | ||
1929 | eecd &= ~E1000_EECD_CS; | ||
1930 | ew32(EECD, eecd); | ||
1931 | e1e_flush(); | ||
1932 | udelay(nvm->delay_usec); | ||
1933 | } | ||
1934 | } | ||
1935 | |||
1936 | /** | ||
1937 | * e1000_stop_nvm - Terminate EEPROM command | ||
1938 | * @hw: pointer to the HW structure | ||
1939 | * | ||
1940 | * Terminates the current command by inverting the EEPROM's chip select pin. | ||
1941 | **/ | ||
1942 | static void e1000_stop_nvm(struct e1000_hw *hw) | ||
1943 | { | ||
1944 | u32 eecd; | ||
1945 | |||
1946 | eecd = er32(EECD); | ||
1947 | if (hw->nvm.type == e1000_nvm_eeprom_spi) { | ||
1948 | /* Pull CS high */ | ||
1949 | eecd |= E1000_EECD_CS; | ||
1950 | e1000_lower_eec_clk(hw, &eecd); | ||
1951 | } | ||
1952 | } | ||
1953 | |||
1954 | /** | ||
1955 | * e1000e_release_nvm - Release exclusive access to EEPROM | ||
1956 | * @hw: pointer to the HW structure | ||
1957 | * | ||
1958 | * Stop any current commands to the EEPROM and clear the EEPROM request bit. | ||
1959 | **/ | ||
1960 | void e1000e_release_nvm(struct e1000_hw *hw) | ||
1961 | { | ||
1962 | u32 eecd; | ||
1963 | |||
1964 | e1000_stop_nvm(hw); | ||
1965 | |||
1966 | eecd = er32(EECD); | ||
1967 | eecd &= ~E1000_EECD_REQ; | ||
1968 | ew32(EECD, eecd); | ||
1969 | } | ||
1970 | |||
1971 | /** | ||
1972 | * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write | ||
1973 | * @hw: pointer to the HW structure | ||
1974 | * | ||
1975 | * Setups the EEPROM for reading and writing. | ||
1976 | **/ | ||
1977 | static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | ||
1978 | { | ||
1979 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
1980 | u32 eecd = er32(EECD); | ||
1981 | u8 spi_stat_reg; | ||
1982 | |||
1983 | if (nvm->type == e1000_nvm_eeprom_spi) { | ||
1984 | u16 timeout = NVM_MAX_RETRY_SPI; | ||
1985 | |||
1986 | /* Clear SK and CS */ | ||
1987 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | ||
1988 | ew32(EECD, eecd); | ||
1989 | e1e_flush(); | ||
1990 | udelay(1); | ||
1991 | |||
1992 | /* | ||
1993 | * Read "Status Register" repeatedly until the LSB is cleared. | ||
1994 | * The EEPROM will signal that the command has been completed | ||
1995 | * by clearing bit 0 of the internal status register. If it's | ||
1996 | * not cleared within 'timeout', then error out. | ||
1997 | */ | ||
1998 | while (timeout) { | ||
1999 | e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, | ||
2000 | hw->nvm.opcode_bits); | ||
2001 | spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); | ||
2002 | if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) | ||
2003 | break; | ||
2004 | |||
2005 | udelay(5); | ||
2006 | e1000_standby_nvm(hw); | ||
2007 | timeout--; | ||
2008 | } | ||
2009 | |||
2010 | if (!timeout) { | ||
2011 | e_dbg("SPI NVM Status error\n"); | ||
2012 | return -E1000_ERR_NVM; | ||
2013 | } | ||
2014 | } | ||
2015 | |||
2016 | return 0; | ||
2017 | } | ||
2018 | |||
2019 | /** | ||
2020 | * e1000e_read_nvm_eerd - Reads EEPROM using EERD register | ||
2021 | * @hw: pointer to the HW structure | ||
2022 | * @offset: offset of word in the EEPROM to read | ||
2023 | * @words: number of words to read | ||
2024 | * @data: word read from the EEPROM | ||
2025 | * | ||
2026 | * Reads a 16 bit word from the EEPROM using the EERD register. | ||
2027 | **/ | ||
2028 | s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | ||
2029 | { | ||
2030 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2031 | u32 i, eerd = 0; | ||
2032 | s32 ret_val = 0; | ||
2033 | |||
2034 | /* | ||
2035 | * A check for invalid values: offset too large, too many words, | ||
2036 | * too many words for the offset, and not enough words. | ||
2037 | */ | ||
2038 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | ||
2039 | (words == 0)) { | ||
2040 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
2041 | return -E1000_ERR_NVM; | ||
2042 | } | ||
2043 | |||
2044 | for (i = 0; i < words; i++) { | ||
2045 | eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + | ||
2046 | E1000_NVM_RW_REG_START; | ||
2047 | |||
2048 | ew32(EERD, eerd); | ||
2049 | ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); | ||
2050 | if (ret_val) | ||
2051 | break; | ||
2052 | |||
2053 | data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); | ||
2054 | } | ||
2055 | |||
2056 | return ret_val; | ||
2057 | } | ||
2058 | |||
2059 | /** | ||
2060 | * e1000e_write_nvm_spi - Write to EEPROM using SPI | ||
2061 | * @hw: pointer to the HW structure | ||
2062 | * @offset: offset within the EEPROM to be written to | ||
2063 | * @words: number of words to write | ||
2064 | * @data: 16 bit word(s) to be written to the EEPROM | ||
2065 | * | ||
2066 | * Writes data to EEPROM at offset using SPI interface. | ||
2067 | * | ||
2068 | * If e1000e_update_nvm_checksum is not called after this function , the | ||
2069 | * EEPROM will most likely contain an invalid checksum. | ||
2070 | **/ | ||
2071 | s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | ||
2072 | { | ||
2073 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
2074 | s32 ret_val; | ||
2075 | u16 widx = 0; | ||
2076 | |||
2077 | /* | ||
2078 | * A check for invalid values: offset too large, too many words, | ||
2079 | * and not enough words. | ||
2080 | */ | ||
2081 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | ||
2082 | (words == 0)) { | ||
2083 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
2084 | return -E1000_ERR_NVM; | ||
2085 | } | ||
2086 | |||
2087 | ret_val = nvm->ops.acquire(hw); | ||
2088 | if (ret_val) | ||
2089 | return ret_val; | ||
2090 | |||
2091 | while (widx < words) { | ||
2092 | u8 write_opcode = NVM_WRITE_OPCODE_SPI; | ||
2093 | |||
2094 | ret_val = e1000_ready_nvm_eeprom(hw); | ||
2095 | if (ret_val) { | ||
2096 | nvm->ops.release(hw); | ||
2097 | return ret_val; | ||
2098 | } | ||
2099 | |||
2100 | e1000_standby_nvm(hw); | ||
2101 | |||
2102 | /* Send the WRITE ENABLE command (8 bit opcode) */ | ||
2103 | e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, | ||
2104 | nvm->opcode_bits); | ||
2105 | |||
2106 | e1000_standby_nvm(hw); | ||
2107 | |||
2108 | /* | ||
2109 | * Some SPI eeproms use the 8th address bit embedded in the | ||
2110 | * opcode | ||
2111 | */ | ||
2112 | if ((nvm->address_bits == 8) && (offset >= 128)) | ||
2113 | write_opcode |= NVM_A8_OPCODE_SPI; | ||
2114 | |||
2115 | /* Send the Write command (8-bit opcode + addr) */ | ||
2116 | e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); | ||
2117 | e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), | ||
2118 | nvm->address_bits); | ||
2119 | |||
2120 | /* Loop to allow for up to whole page write of eeprom */ | ||
2121 | while (widx < words) { | ||
2122 | u16 word_out = data[widx]; | ||
2123 | word_out = (word_out >> 8) | (word_out << 8); | ||
2124 | e1000_shift_out_eec_bits(hw, word_out, 16); | ||
2125 | widx++; | ||
2126 | |||
2127 | if ((((offset + widx) * 2) % nvm->page_size) == 0) { | ||
2128 | e1000_standby_nvm(hw); | ||
2129 | break; | ||
2130 | } | ||
2131 | } | ||
2132 | } | ||
2133 | |||
2134 | usleep_range(10000, 20000); | ||
2135 | nvm->ops.release(hw); | ||
2136 | return 0; | ||
2137 | } | ||
2138 | |||
2139 | /** | ||
2140 | * e1000_read_pba_string_generic - Read device part number | ||
2141 | * @hw: pointer to the HW structure | ||
2142 | * @pba_num: pointer to device part number | ||
2143 | * @pba_num_size: size of part number buffer | ||
2144 | * | ||
2145 | * Reads the product board assembly (PBA) number from the EEPROM and stores | ||
2146 | * the value in pba_num. | ||
2147 | **/ | ||
2148 | s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, | ||
2149 | u32 pba_num_size) | ||
2150 | { | ||
2151 | s32 ret_val; | ||
2152 | u16 nvm_data; | ||
2153 | u16 pba_ptr; | ||
2154 | u16 offset; | ||
2155 | u16 length; | ||
2156 | |||
2157 | if (pba_num == NULL) { | ||
2158 | e_dbg("PBA string buffer was null\n"); | ||
2159 | ret_val = E1000_ERR_INVALID_ARGUMENT; | ||
2160 | goto out; | ||
2161 | } | ||
2162 | |||
2163 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); | ||
2164 | if (ret_val) { | ||
2165 | e_dbg("NVM Read Error\n"); | ||
2166 | goto out; | ||
2167 | } | ||
2168 | |||
2169 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); | ||
2170 | if (ret_val) { | ||
2171 | e_dbg("NVM Read Error\n"); | ||
2172 | goto out; | ||
2173 | } | ||
2174 | |||
2175 | /* | ||
2176 | * if nvm_data is not ptr guard the PBA must be in legacy format which | ||
2177 | * means pba_ptr is actually our second data word for the PBA number | ||
2178 | * and we can decode it into an ascii string | ||
2179 | */ | ||
2180 | if (nvm_data != NVM_PBA_PTR_GUARD) { | ||
2181 | e_dbg("NVM PBA number is not stored as string\n"); | ||
2182 | |||
2183 | /* we will need 11 characters to store the PBA */ | ||
2184 | if (pba_num_size < 11) { | ||
2185 | e_dbg("PBA string buffer too small\n"); | ||
2186 | return E1000_ERR_NO_SPACE; | ||
2187 | } | ||
2188 | |||
2189 | /* extract hex string from data and pba_ptr */ | ||
2190 | pba_num[0] = (nvm_data >> 12) & 0xF; | ||
2191 | pba_num[1] = (nvm_data >> 8) & 0xF; | ||
2192 | pba_num[2] = (nvm_data >> 4) & 0xF; | ||
2193 | pba_num[3] = nvm_data & 0xF; | ||
2194 | pba_num[4] = (pba_ptr >> 12) & 0xF; | ||
2195 | pba_num[5] = (pba_ptr >> 8) & 0xF; | ||
2196 | pba_num[6] = '-'; | ||
2197 | pba_num[7] = 0; | ||
2198 | pba_num[8] = (pba_ptr >> 4) & 0xF; | ||
2199 | pba_num[9] = pba_ptr & 0xF; | ||
2200 | |||
2201 | /* put a null character on the end of our string */ | ||
2202 | pba_num[10] = '\0'; | ||
2203 | |||
2204 | /* switch all the data but the '-' to hex char */ | ||
2205 | for (offset = 0; offset < 10; offset++) { | ||
2206 | if (pba_num[offset] < 0xA) | ||
2207 | pba_num[offset] += '0'; | ||
2208 | else if (pba_num[offset] < 0x10) | ||
2209 | pba_num[offset] += 'A' - 0xA; | ||
2210 | } | ||
2211 | |||
2212 | goto out; | ||
2213 | } | ||
2214 | |||
2215 | ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); | ||
2216 | if (ret_val) { | ||
2217 | e_dbg("NVM Read Error\n"); | ||
2218 | goto out; | ||
2219 | } | ||
2220 | |||
2221 | if (length == 0xFFFF || length == 0) { | ||
2222 | e_dbg("NVM PBA number section invalid length\n"); | ||
2223 | ret_val = E1000_ERR_NVM_PBA_SECTION; | ||
2224 | goto out; | ||
2225 | } | ||
2226 | /* check if pba_num buffer is big enough */ | ||
2227 | if (pba_num_size < (((u32)length * 2) - 1)) { | ||
2228 | e_dbg("PBA string buffer too small\n"); | ||
2229 | ret_val = E1000_ERR_NO_SPACE; | ||
2230 | goto out; | ||
2231 | } | ||
2232 | |||
2233 | /* trim pba length from start of string */ | ||
2234 | pba_ptr++; | ||
2235 | length--; | ||
2236 | |||
2237 | for (offset = 0; offset < length; offset++) { | ||
2238 | ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); | ||
2239 | if (ret_val) { | ||
2240 | e_dbg("NVM Read Error\n"); | ||
2241 | goto out; | ||
2242 | } | ||
2243 | pba_num[offset * 2] = (u8)(nvm_data >> 8); | ||
2244 | pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); | ||
2245 | } | ||
2246 | pba_num[offset * 2] = '\0'; | ||
2247 | |||
2248 | out: | ||
2249 | return ret_val; | ||
2250 | } | ||
2251 | |||
2252 | /** | ||
2253 | * e1000_read_mac_addr_generic - Read device MAC address | ||
2254 | * @hw: pointer to the HW structure | ||
2255 | * | ||
2256 | * Reads the device MAC address from the EEPROM and stores the value. | ||
2257 | * Since devices with two ports use the same EEPROM, we increment the | ||
2258 | * last bit in the MAC address for the second port. | ||
2259 | **/ | ||
2260 | s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) | ||
2261 | { | ||
2262 | u32 rar_high; | ||
2263 | u32 rar_low; | ||
2264 | u16 i; | ||
2265 | |||
2266 | rar_high = er32(RAH(0)); | ||
2267 | rar_low = er32(RAL(0)); | ||
2268 | |||
2269 | for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) | ||
2270 | hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); | ||
2271 | |||
2272 | for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) | ||
2273 | hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); | ||
2274 | |||
2275 | for (i = 0; i < ETH_ALEN; i++) | ||
2276 | hw->mac.addr[i] = hw->mac.perm_addr[i]; | ||
2277 | |||
2278 | return 0; | ||
2279 | } | ||
2280 | |||
2281 | /** | ||
2282 | * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum | ||
2283 | * @hw: pointer to the HW structure | ||
2284 | * | ||
2285 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | ||
2286 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | ||
2287 | **/ | ||
2288 | s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) | ||
2289 | { | ||
2290 | s32 ret_val; | ||
2291 | u16 checksum = 0; | ||
2292 | u16 i, nvm_data; | ||
2293 | |||
2294 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | ||
2295 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | ||
2296 | if (ret_val) { | ||
2297 | e_dbg("NVM Read Error\n"); | ||
2298 | return ret_val; | ||
2299 | } | ||
2300 | checksum += nvm_data; | ||
2301 | } | ||
2302 | |||
2303 | if (checksum != (u16) NVM_SUM) { | ||
2304 | e_dbg("NVM Checksum Invalid\n"); | ||
2305 | return -E1000_ERR_NVM; | ||
2306 | } | ||
2307 | |||
2308 | return 0; | ||
2309 | } | ||
2310 | |||
2311 | /** | ||
2312 | * e1000e_update_nvm_checksum_generic - Update EEPROM checksum | ||
2313 | * @hw: pointer to the HW structure | ||
2314 | * | ||
2315 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | ||
2316 | * up to the checksum. Then calculates the EEPROM checksum and writes the | ||
2317 | * value to the EEPROM. | ||
2318 | **/ | ||
2319 | s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) | ||
2320 | { | ||
2321 | s32 ret_val; | ||
2322 | u16 checksum = 0; | ||
2323 | u16 i, nvm_data; | ||
2324 | |||
2325 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | ||
2326 | ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); | ||
2327 | if (ret_val) { | ||
2328 | e_dbg("NVM Read Error while updating checksum.\n"); | ||
2329 | return ret_val; | ||
2330 | } | ||
2331 | checksum += nvm_data; | ||
2332 | } | ||
2333 | checksum = (u16) NVM_SUM - checksum; | ||
2334 | ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); | ||
2335 | if (ret_val) | ||
2336 | e_dbg("NVM Write Error while updating checksum.\n"); | ||
2337 | |||
2338 | return ret_val; | ||
2339 | } | ||
2340 | |||
2341 | /** | ||
2342 | * e1000e_reload_nvm - Reloads EEPROM | ||
2343 | * @hw: pointer to the HW structure | ||
2344 | * | ||
2345 | * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the | ||
2346 | * extended control register. | ||
2347 | **/ | ||
2348 | void e1000e_reload_nvm(struct e1000_hw *hw) | ||
2349 | { | ||
2350 | u32 ctrl_ext; | ||
2351 | |||
2352 | udelay(10); | ||
2353 | ctrl_ext = er32(CTRL_EXT); | ||
2354 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | ||
2355 | ew32(CTRL_EXT, ctrl_ext); | ||
2356 | e1e_flush(); | ||
2357 | } | ||
2358 | |||
2359 | /** | ||
2360 | * e1000_calculate_checksum - Calculate checksum for buffer | ||
2361 | * @buffer: pointer to EEPROM | ||
2362 | * @length: size of EEPROM to calculate a checksum for | ||
2363 | * | ||
2364 | * Calculates the checksum for some buffer on a specified length. The | ||
2365 | * checksum calculated is returned. | ||
2366 | **/ | ||
2367 | static u8 e1000_calculate_checksum(u8 *buffer, u32 length) | ||
2368 | { | ||
2369 | u32 i; | ||
2370 | u8 sum = 0; | ||
2371 | |||
2372 | if (!buffer) | ||
2373 | return 0; | ||
2374 | |||
2375 | for (i = 0; i < length; i++) | ||
2376 | sum += buffer[i]; | ||
2377 | |||
2378 | return (u8) (0 - sum); | ||
2379 | } | ||
2380 | |||
2381 | /** | ||
2382 | * e1000_mng_enable_host_if - Checks host interface is enabled | ||
2383 | * @hw: pointer to the HW structure | ||
2384 | * | ||
2385 | * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND | ||
2386 | * | ||
2387 | * This function checks whether the HOST IF is enabled for command operation | ||
2388 | * and also checks whether the previous command is completed. It busy waits | ||
2389 | * in case of previous command is not completed. | ||
2390 | **/ | ||
2391 | static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | ||
2392 | { | ||
2393 | u32 hicr; | ||
2394 | u8 i; | ||
2395 | |||
2396 | if (!(hw->mac.arc_subsystem_valid)) { | ||
2397 | e_dbg("ARC subsystem not valid.\n"); | ||
2398 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | ||
2399 | } | ||
2400 | |||
2401 | /* Check that the host interface is enabled. */ | ||
2402 | hicr = er32(HICR); | ||
2403 | if ((hicr & E1000_HICR_EN) == 0) { | ||
2404 | e_dbg("E1000_HOST_EN bit disabled.\n"); | ||
2405 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | ||
2406 | } | ||
2407 | /* check the previous command is completed */ | ||
2408 | for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { | ||
2409 | hicr = er32(HICR); | ||
2410 | if (!(hicr & E1000_HICR_C)) | ||
2411 | break; | ||
2412 | mdelay(1); | ||
2413 | } | ||
2414 | |||
2415 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | ||
2416 | e_dbg("Previous command timeout failed .\n"); | ||
2417 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | ||
2418 | } | ||
2419 | |||
2420 | return 0; | ||
2421 | } | ||
2422 | |||
2423 | /** | ||
2424 | * e1000e_check_mng_mode_generic - check management mode | ||
2425 | * @hw: pointer to the HW structure | ||
2426 | * | ||
2427 | * Reads the firmware semaphore register and returns true (>0) if | ||
2428 | * manageability is enabled, else false (0). | ||
2429 | **/ | ||
2430 | bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) | ||
2431 | { | ||
2432 | u32 fwsm = er32(FWSM); | ||
2433 | |||
2434 | return (fwsm & E1000_FWSM_MODE_MASK) == | ||
2435 | (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); | ||
2436 | } | ||
2437 | |||
2438 | /** | ||
2439 | * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx | ||
2440 | * @hw: pointer to the HW structure | ||
2441 | * | ||
2442 | * Enables packet filtering on transmit packets if manageability is enabled | ||
2443 | * and host interface is enabled. | ||
2444 | **/ | ||
2445 | bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | ||
2446 | { | ||
2447 | struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; | ||
2448 | u32 *buffer = (u32 *)&hw->mng_cookie; | ||
2449 | u32 offset; | ||
2450 | s32 ret_val, hdr_csum, csum; | ||
2451 | u8 i, len; | ||
2452 | |||
2453 | hw->mac.tx_pkt_filtering = true; | ||
2454 | |||
2455 | /* No manageability, no filtering */ | ||
2456 | if (!e1000e_check_mng_mode(hw)) { | ||
2457 | hw->mac.tx_pkt_filtering = false; | ||
2458 | goto out; | ||
2459 | } | ||
2460 | |||
2461 | /* | ||
2462 | * If we can't read from the host interface for whatever | ||
2463 | * reason, disable filtering. | ||
2464 | */ | ||
2465 | ret_val = e1000_mng_enable_host_if(hw); | ||
2466 | if (ret_val) { | ||
2467 | hw->mac.tx_pkt_filtering = false; | ||
2468 | goto out; | ||
2469 | } | ||
2470 | |||
2471 | /* Read in the header. Length and offset are in dwords. */ | ||
2472 | len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; | ||
2473 | offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; | ||
2474 | for (i = 0; i < len; i++) | ||
2475 | *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); | ||
2476 | hdr_csum = hdr->checksum; | ||
2477 | hdr->checksum = 0; | ||
2478 | csum = e1000_calculate_checksum((u8 *)hdr, | ||
2479 | E1000_MNG_DHCP_COOKIE_LENGTH); | ||
2480 | /* | ||
2481 | * If either the checksums or signature don't match, then | ||
2482 | * the cookie area isn't considered valid, in which case we | ||
2483 | * take the safe route of assuming Tx filtering is enabled. | ||
2484 | */ | ||
2485 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { | ||
2486 | hw->mac.tx_pkt_filtering = true; | ||
2487 | goto out; | ||
2488 | } | ||
2489 | |||
2490 | /* Cookie area is valid, make the final check for filtering. */ | ||
2491 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { | ||
2492 | hw->mac.tx_pkt_filtering = false; | ||
2493 | goto out; | ||
2494 | } | ||
2495 | |||
2496 | out: | ||
2497 | return hw->mac.tx_pkt_filtering; | ||
2498 | } | ||
2499 | |||
2500 | /** | ||
2501 | * e1000_mng_write_cmd_header - Writes manageability command header | ||
2502 | * @hw: pointer to the HW structure | ||
2503 | * @hdr: pointer to the host interface command header | ||
2504 | * | ||
2505 | * Writes the command header after does the checksum calculation. | ||
2506 | **/ | ||
2507 | static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, | ||
2508 | struct e1000_host_mng_command_header *hdr) | ||
2509 | { | ||
2510 | u16 i, length = sizeof(struct e1000_host_mng_command_header); | ||
2511 | |||
2512 | /* Write the whole command header structure with new checksum. */ | ||
2513 | |||
2514 | hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); | ||
2515 | |||
2516 | length >>= 2; | ||
2517 | /* Write the relevant command block into the ram area. */ | ||
2518 | for (i = 0; i < length; i++) { | ||
2519 | E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, | ||
2520 | *((u32 *) hdr + i)); | ||
2521 | e1e_flush(); | ||
2522 | } | ||
2523 | |||
2524 | return 0; | ||
2525 | } | ||
2526 | |||
2527 | /** | ||
2528 | * e1000_mng_host_if_write - Write to the manageability host interface | ||
2529 | * @hw: pointer to the HW structure | ||
2530 | * @buffer: pointer to the host interface buffer | ||
2531 | * @length: size of the buffer | ||
2532 | * @offset: location in the buffer to write to | ||
2533 | * @sum: sum of the data (not checksum) | ||
2534 | * | ||
2535 | * This function writes the buffer content at the offset given on the host if. | ||
2536 | * It also does alignment considerations to do the writes in most efficient | ||
2537 | * way. Also fills up the sum of the buffer in *buffer parameter. | ||
2538 | **/ | ||
2539 | static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, | ||
2540 | u16 length, u16 offset, u8 *sum) | ||
2541 | { | ||
2542 | u8 *tmp; | ||
2543 | u8 *bufptr = buffer; | ||
2544 | u32 data = 0; | ||
2545 | u16 remaining, i, j, prev_bytes; | ||
2546 | |||
2547 | /* sum = only sum of the data and it is not checksum */ | ||
2548 | |||
2549 | if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) | ||
2550 | return -E1000_ERR_PARAM; | ||
2551 | |||
2552 | tmp = (u8 *)&data; | ||
2553 | prev_bytes = offset & 0x3; | ||
2554 | offset >>= 2; | ||
2555 | |||
2556 | if (prev_bytes) { | ||
2557 | data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); | ||
2558 | for (j = prev_bytes; j < sizeof(u32); j++) { | ||
2559 | *(tmp + j) = *bufptr++; | ||
2560 | *sum += *(tmp + j); | ||
2561 | } | ||
2562 | E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); | ||
2563 | length -= j - prev_bytes; | ||
2564 | offset++; | ||
2565 | } | ||
2566 | |||
2567 | remaining = length & 0x3; | ||
2568 | length -= remaining; | ||
2569 | |||
2570 | /* Calculate length in DWORDs */ | ||
2571 | length >>= 2; | ||
2572 | |||
2573 | /* | ||
2574 | * The device driver writes the relevant command block into the | ||
2575 | * ram area. | ||
2576 | */ | ||
2577 | for (i = 0; i < length; i++) { | ||
2578 | for (j = 0; j < sizeof(u32); j++) { | ||
2579 | *(tmp + j) = *bufptr++; | ||
2580 | *sum += *(tmp + j); | ||
2581 | } | ||
2582 | |||
2583 | E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); | ||
2584 | } | ||
2585 | if (remaining) { | ||
2586 | for (j = 0; j < sizeof(u32); j++) { | ||
2587 | if (j < remaining) | ||
2588 | *(tmp + j) = *bufptr++; | ||
2589 | else | ||
2590 | *(tmp + j) = 0; | ||
2591 | |||
2592 | *sum += *(tmp + j); | ||
2593 | } | ||
2594 | E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); | ||
2595 | } | ||
2596 | |||
2597 | return 0; | ||
2598 | } | ||
2599 | |||
2600 | /** | ||
2601 | * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface | ||
2602 | * @hw: pointer to the HW structure | ||
2603 | * @buffer: pointer to the host interface | ||
2604 | * @length: size of the buffer | ||
2605 | * | ||
2606 | * Writes the DHCP information to the host interface. | ||
2607 | **/ | ||
2608 | s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) | ||
2609 | { | ||
2610 | struct e1000_host_mng_command_header hdr; | ||
2611 | s32 ret_val; | ||
2612 | u32 hicr; | ||
2613 | |||
2614 | hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; | ||
2615 | hdr.command_length = length; | ||
2616 | hdr.reserved1 = 0; | ||
2617 | hdr.reserved2 = 0; | ||
2618 | hdr.checksum = 0; | ||
2619 | |||
2620 | /* Enable the host interface */ | ||
2621 | ret_val = e1000_mng_enable_host_if(hw); | ||
2622 | if (ret_val) | ||
2623 | return ret_val; | ||
2624 | |||
2625 | /* Populate the host interface with the contents of "buffer". */ | ||
2626 | ret_val = e1000_mng_host_if_write(hw, buffer, length, | ||
2627 | sizeof(hdr), &(hdr.checksum)); | ||
2628 | if (ret_val) | ||
2629 | return ret_val; | ||
2630 | |||
2631 | /* Write the manageability command header */ | ||
2632 | ret_val = e1000_mng_write_cmd_header(hw, &hdr); | ||
2633 | if (ret_val) | ||
2634 | return ret_val; | ||
2635 | |||
2636 | /* Tell the ARC a new command is pending. */ | ||
2637 | hicr = er32(HICR); | ||
2638 | ew32(HICR, hicr | E1000_HICR_C); | ||
2639 | |||
2640 | return 0; | ||
2641 | } | ||
2642 | |||
2643 | /** | ||
2644 | * e1000e_enable_mng_pass_thru - Check if management passthrough is needed | ||
2645 | * @hw: pointer to the HW structure | ||
2646 | * | ||
2647 | * Verifies the hardware needs to leave interface enabled so that frames can | ||
2648 | * be directed to and from the management interface. | ||
2649 | **/ | ||
2650 | bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | ||
2651 | { | ||
2652 | u32 manc; | ||
2653 | u32 fwsm, factps; | ||
2654 | bool ret_val = false; | ||
2655 | |||
2656 | manc = er32(MANC); | ||
2657 | |||
2658 | if (!(manc & E1000_MANC_RCV_TCO_EN)) | ||
2659 | goto out; | ||
2660 | |||
2661 | if (hw->mac.has_fwsm) { | ||
2662 | fwsm = er32(FWSM); | ||
2663 | factps = er32(FACTPS); | ||
2664 | |||
2665 | if (!(factps & E1000_FACTPS_MNGCG) && | ||
2666 | ((fwsm & E1000_FWSM_MODE_MASK) == | ||
2667 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { | ||
2668 | ret_val = true; | ||
2669 | goto out; | ||
2670 | } | ||
2671 | } else if ((hw->mac.type == e1000_82574) || | ||
2672 | (hw->mac.type == e1000_82583)) { | ||
2673 | u16 data; | ||
2674 | |||
2675 | factps = er32(FACTPS); | ||
2676 | e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); | ||
2677 | |||
2678 | if (!(factps & E1000_FACTPS_MNGCG) && | ||
2679 | ((data & E1000_NVM_INIT_CTRL2_MNGM) == | ||
2680 | (e1000_mng_mode_pt << 13))) { | ||
2681 | ret_val = true; | ||
2682 | goto out; | ||
2683 | } | ||
2684 | } else if ((manc & E1000_MANC_SMBUS_EN) && | ||
2685 | !(manc & E1000_MANC_ASF_EN)) { | ||
2686 | ret_val = true; | ||
2687 | goto out; | ||
2688 | } | ||
2689 | |||
2690 | out: | ||
2691 | return ret_val; | ||
2692 | } | ||
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c new file mode 100644 index 000000000000..ab4be80f7ab5 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -0,0 +1,6312 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
31 | #include <linux/module.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/vmalloc.h> | ||
36 | #include <linux/pagemap.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/tcp.h> | ||
41 | #include <linux/ipv6.h> | ||
42 | #include <linux/slab.h> | ||
43 | #include <net/checksum.h> | ||
44 | #include <net/ip6_checksum.h> | ||
45 | #include <linux/mii.h> | ||
46 | #include <linux/ethtool.h> | ||
47 | #include <linux/if_vlan.h> | ||
48 | #include <linux/cpu.h> | ||
49 | #include <linux/smp.h> | ||
50 | #include <linux/pm_qos_params.h> | ||
51 | #include <linux/pm_runtime.h> | ||
52 | #include <linux/aer.h> | ||
53 | #include <linux/prefetch.h> | ||
54 | |||
55 | #include "e1000.h" | ||
56 | |||
57 | #define DRV_EXTRAVERSION "-k" | ||
58 | |||
59 | #define DRV_VERSION "1.3.16" DRV_EXTRAVERSION | ||
60 | char e1000e_driver_name[] = "e1000e"; | ||
61 | const char e1000e_driver_version[] = DRV_VERSION; | ||
62 | |||
63 | static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | ||
64 | |||
65 | static const struct e1000_info *e1000_info_tbl[] = { | ||
66 | [board_82571] = &e1000_82571_info, | ||
67 | [board_82572] = &e1000_82572_info, | ||
68 | [board_82573] = &e1000_82573_info, | ||
69 | [board_82574] = &e1000_82574_info, | ||
70 | [board_82583] = &e1000_82583_info, | ||
71 | [board_80003es2lan] = &e1000_es2_info, | ||
72 | [board_ich8lan] = &e1000_ich8_info, | ||
73 | [board_ich9lan] = &e1000_ich9_info, | ||
74 | [board_ich10lan] = &e1000_ich10_info, | ||
75 | [board_pchlan] = &e1000_pch_info, | ||
76 | [board_pch2lan] = &e1000_pch2_info, | ||
77 | }; | ||
78 | |||
79 | struct e1000_reg_info { | ||
80 | u32 ofs; | ||
81 | char *name; | ||
82 | }; | ||
83 | |||
84 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ | ||
85 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ | ||
86 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ | ||
87 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ | ||
88 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ | ||
89 | |||
90 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | ||
91 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | ||
92 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | ||
93 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ | ||
94 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ | ||
95 | |||
96 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { | ||
97 | |||
98 | /* General Registers */ | ||
99 | {E1000_CTRL, "CTRL"}, | ||
100 | {E1000_STATUS, "STATUS"}, | ||
101 | {E1000_CTRL_EXT, "CTRL_EXT"}, | ||
102 | |||
103 | /* Interrupt Registers */ | ||
104 | {E1000_ICR, "ICR"}, | ||
105 | |||
106 | /* Rx Registers */ | ||
107 | {E1000_RCTL, "RCTL"}, | ||
108 | {E1000_RDLEN, "RDLEN"}, | ||
109 | {E1000_RDH, "RDH"}, | ||
110 | {E1000_RDT, "RDT"}, | ||
111 | {E1000_RDTR, "RDTR"}, | ||
112 | {E1000_RXDCTL(0), "RXDCTL"}, | ||
113 | {E1000_ERT, "ERT"}, | ||
114 | {E1000_RDBAL, "RDBAL"}, | ||
115 | {E1000_RDBAH, "RDBAH"}, | ||
116 | {E1000_RDFH, "RDFH"}, | ||
117 | {E1000_RDFT, "RDFT"}, | ||
118 | {E1000_RDFHS, "RDFHS"}, | ||
119 | {E1000_RDFTS, "RDFTS"}, | ||
120 | {E1000_RDFPC, "RDFPC"}, | ||
121 | |||
122 | /* Tx Registers */ | ||
123 | {E1000_TCTL, "TCTL"}, | ||
124 | {E1000_TDBAL, "TDBAL"}, | ||
125 | {E1000_TDBAH, "TDBAH"}, | ||
126 | {E1000_TDLEN, "TDLEN"}, | ||
127 | {E1000_TDH, "TDH"}, | ||
128 | {E1000_TDT, "TDT"}, | ||
129 | {E1000_TIDV, "TIDV"}, | ||
130 | {E1000_TXDCTL(0), "TXDCTL"}, | ||
131 | {E1000_TADV, "TADV"}, | ||
132 | {E1000_TARC(0), "TARC"}, | ||
133 | {E1000_TDFH, "TDFH"}, | ||
134 | {E1000_TDFT, "TDFT"}, | ||
135 | {E1000_TDFHS, "TDFHS"}, | ||
136 | {E1000_TDFTS, "TDFTS"}, | ||
137 | {E1000_TDFPC, "TDFPC"}, | ||
138 | |||
139 | /* List Terminator */ | ||
140 | {} | ||
141 | }; | ||
142 | |||
143 | /* | ||
144 | * e1000_regdump - register printout routine | ||
145 | */ | ||
146 | static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | ||
147 | { | ||
148 | int n = 0; | ||
149 | char rname[16]; | ||
150 | u32 regs[8]; | ||
151 | |||
152 | switch (reginfo->ofs) { | ||
153 | case E1000_RXDCTL(0): | ||
154 | for (n = 0; n < 2; n++) | ||
155 | regs[n] = __er32(hw, E1000_RXDCTL(n)); | ||
156 | break; | ||
157 | case E1000_TXDCTL(0): | ||
158 | for (n = 0; n < 2; n++) | ||
159 | regs[n] = __er32(hw, E1000_TXDCTL(n)); | ||
160 | break; | ||
161 | case E1000_TARC(0): | ||
162 | for (n = 0; n < 2; n++) | ||
163 | regs[n] = __er32(hw, E1000_TARC(n)); | ||
164 | break; | ||
165 | default: | ||
166 | printk(KERN_INFO "%-15s %08x\n", | ||
167 | reginfo->name, __er32(hw, reginfo->ofs)); | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); | ||
172 | printk(KERN_INFO "%-15s ", rname); | ||
173 | for (n = 0; n < 2; n++) | ||
174 | printk(KERN_CONT "%08x ", regs[n]); | ||
175 | printk(KERN_CONT "\n"); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * e1000e_dump - Print registers, Tx-ring and Rx-ring | ||
180 | */ | ||
181 | static void e1000e_dump(struct e1000_adapter *adapter) | ||
182 | { | ||
183 | struct net_device *netdev = adapter->netdev; | ||
184 | struct e1000_hw *hw = &adapter->hw; | ||
185 | struct e1000_reg_info *reginfo; | ||
186 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
187 | struct e1000_tx_desc *tx_desc; | ||
188 | struct my_u0 { | ||
189 | u64 a; | ||
190 | u64 b; | ||
191 | } *u0; | ||
192 | struct e1000_buffer *buffer_info; | ||
193 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
194 | union e1000_rx_desc_packet_split *rx_desc_ps; | ||
195 | struct e1000_rx_desc *rx_desc; | ||
196 | struct my_u1 { | ||
197 | u64 a; | ||
198 | u64 b; | ||
199 | u64 c; | ||
200 | u64 d; | ||
201 | } *u1; | ||
202 | u32 staterr; | ||
203 | int i = 0; | ||
204 | |||
205 | if (!netif_msg_hw(adapter)) | ||
206 | return; | ||
207 | |||
208 | /* Print netdevice Info */ | ||
209 | if (netdev) { | ||
210 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | ||
211 | printk(KERN_INFO "Device Name state " | ||
212 | "trans_start last_rx\n"); | ||
213 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", | ||
214 | netdev->name, netdev->state, netdev->trans_start, | ||
215 | netdev->last_rx); | ||
216 | } | ||
217 | |||
218 | /* Print Registers */ | ||
219 | dev_info(&adapter->pdev->dev, "Register Dump\n"); | ||
220 | printk(KERN_INFO " Register Name Value\n"); | ||
221 | for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; | ||
222 | reginfo->name; reginfo++) { | ||
223 | e1000_regdump(hw, reginfo); | ||
224 | } | ||
225 | |||
226 | /* Print Tx Ring Summary */ | ||
227 | if (!netdev || !netif_running(netdev)) | ||
228 | goto exit; | ||
229 | |||
230 | dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); | ||
231 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" | ||
232 | " leng ntw timestamp\n"); | ||
233 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; | ||
234 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | ||
235 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, | ||
236 | (unsigned long long)buffer_info->dma, | ||
237 | buffer_info->length, | ||
238 | buffer_info->next_to_watch, | ||
239 | (unsigned long long)buffer_info->time_stamp); | ||
240 | |||
241 | /* Print Tx Ring */ | ||
242 | if (!netif_msg_tx_done(adapter)) | ||
243 | goto rx_ring_summary; | ||
244 | |||
245 | dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); | ||
246 | |||
247 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) | ||
248 | * | ||
249 | * Legacy Transmit Descriptor | ||
250 | * +--------------------------------------------------------------+ | ||
251 | * 0 | Buffer Address [63:0] (Reserved on Write Back) | | ||
252 | * +--------------------------------------------------------------+ | ||
253 | * 8 | Special | CSS | Status | CMD | CSO | Length | | ||
254 | * +--------------------------------------------------------------+ | ||
255 | * 63 48 47 36 35 32 31 24 23 16 15 0 | ||
256 | * | ||
257 | * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload | ||
258 | * 63 48 47 40 39 32 31 16 15 8 7 0 | ||
259 | * +----------------------------------------------------------------+ | ||
260 | * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | | ||
261 | * +----------------------------------------------------------------+ | ||
262 | * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | | ||
263 | * +----------------------------------------------------------------+ | ||
264 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 | ||
265 | * | ||
266 | * Extended Data Descriptor (DTYP=0x1) | ||
267 | * +----------------------------------------------------------------+ | ||
268 | * 0 | Buffer Address [63:0] | | ||
269 | * +----------------------------------------------------------------+ | ||
270 | * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | | ||
271 | * +----------------------------------------------------------------+ | ||
272 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 | ||
273 | */ | ||
274 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" | ||
275 | " [bi->dma ] leng ntw timestamp bi->skb " | ||
276 | "<-- Legacy format\n"); | ||
277 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" | ||
278 | " [bi->dma ] leng ntw timestamp bi->skb " | ||
279 | "<-- Ext Context format\n"); | ||
280 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" | ||
281 | " [bi->dma ] leng ntw timestamp bi->skb " | ||
282 | "<-- Ext Data format\n"); | ||
283 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | ||
284 | tx_desc = E1000_TX_DESC(*tx_ring, i); | ||
285 | buffer_info = &tx_ring->buffer_info[i]; | ||
286 | u0 = (struct my_u0 *)tx_desc; | ||
287 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " | ||
288 | "%04X %3X %016llX %p", | ||
289 | (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : | ||
290 | ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, | ||
291 | (unsigned long long)le64_to_cpu(u0->a), | ||
292 | (unsigned long long)le64_to_cpu(u0->b), | ||
293 | (unsigned long long)buffer_info->dma, | ||
294 | buffer_info->length, buffer_info->next_to_watch, | ||
295 | (unsigned long long)buffer_info->time_stamp, | ||
296 | buffer_info->skb); | ||
297 | if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) | ||
298 | printk(KERN_CONT " NTC/U\n"); | ||
299 | else if (i == tx_ring->next_to_use) | ||
300 | printk(KERN_CONT " NTU\n"); | ||
301 | else if (i == tx_ring->next_to_clean) | ||
302 | printk(KERN_CONT " NTC\n"); | ||
303 | else | ||
304 | printk(KERN_CONT "\n"); | ||
305 | |||
306 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | ||
307 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | ||
308 | 16, 1, phys_to_virt(buffer_info->dma), | ||
309 | buffer_info->length, true); | ||
310 | } | ||
311 | |||
312 | /* Print Rx Ring Summary */ | ||
313 | rx_ring_summary: | ||
314 | dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); | ||
315 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); | ||
316 | printk(KERN_INFO " %5d %5X %5X\n", 0, | ||
317 | rx_ring->next_to_use, rx_ring->next_to_clean); | ||
318 | |||
319 | /* Print Rx Ring */ | ||
320 | if (!netif_msg_rx_status(adapter)) | ||
321 | goto exit; | ||
322 | |||
323 | dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); | ||
324 | switch (adapter->rx_ps_pages) { | ||
325 | case 1: | ||
326 | case 2: | ||
327 | case 3: | ||
328 | /* [Extended] Packet Split Receive Descriptor Format | ||
329 | * | ||
330 | * +-----------------------------------------------------+ | ||
331 | * 0 | Buffer Address 0 [63:0] | | ||
332 | * +-----------------------------------------------------+ | ||
333 | * 8 | Buffer Address 1 [63:0] | | ||
334 | * +-----------------------------------------------------+ | ||
335 | * 16 | Buffer Address 2 [63:0] | | ||
336 | * +-----------------------------------------------------+ | ||
337 | * 24 | Buffer Address 3 [63:0] | | ||
338 | * +-----------------------------------------------------+ | ||
339 | */ | ||
340 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " | ||
341 | "[buffer 1 63:0 ] " | ||
342 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " | ||
343 | "[bi->skb] <-- Ext Pkt Split format\n"); | ||
344 | /* [Extended] Receive Descriptor (Write-Back) Format | ||
345 | * | ||
346 | * 63 48 47 32 31 13 12 8 7 4 3 0 | ||
347 | * +------------------------------------------------------+ | ||
348 | * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | | ||
349 | * | Checksum | Ident | | Queue | | Type | | ||
350 | * +------------------------------------------------------+ | ||
351 | * 8 | VLAN Tag | Length | Extended Error | Extended Status | | ||
352 | * +------------------------------------------------------+ | ||
353 | * 63 48 47 32 31 20 19 0 | ||
354 | */ | ||
355 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " | ||
356 | "[vl l0 ee es] " | ||
357 | "[ l3 l2 l1 hs] [reserved ] ---------------- " | ||
358 | "[bi->skb] <-- Ext Rx Write-Back format\n"); | ||
359 | for (i = 0; i < rx_ring->count; i++) { | ||
360 | buffer_info = &rx_ring->buffer_info[i]; | ||
361 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); | ||
362 | u1 = (struct my_u1 *)rx_desc_ps; | ||
363 | staterr = | ||
364 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); | ||
365 | if (staterr & E1000_RXD_STAT_DD) { | ||
366 | /* Descriptor Done */ | ||
367 | printk(KERN_INFO "RWB[0x%03X] %016llX " | ||
368 | "%016llX %016llX %016llX " | ||
369 | "---------------- %p", i, | ||
370 | (unsigned long long)le64_to_cpu(u1->a), | ||
371 | (unsigned long long)le64_to_cpu(u1->b), | ||
372 | (unsigned long long)le64_to_cpu(u1->c), | ||
373 | (unsigned long long)le64_to_cpu(u1->d), | ||
374 | buffer_info->skb); | ||
375 | } else { | ||
376 | printk(KERN_INFO "R [0x%03X] %016llX " | ||
377 | "%016llX %016llX %016llX %016llX %p", i, | ||
378 | (unsigned long long)le64_to_cpu(u1->a), | ||
379 | (unsigned long long)le64_to_cpu(u1->b), | ||
380 | (unsigned long long)le64_to_cpu(u1->c), | ||
381 | (unsigned long long)le64_to_cpu(u1->d), | ||
382 | (unsigned long long)buffer_info->dma, | ||
383 | buffer_info->skb); | ||
384 | |||
385 | if (netif_msg_pktdata(adapter)) | ||
386 | print_hex_dump(KERN_INFO, "", | ||
387 | DUMP_PREFIX_ADDRESS, 16, 1, | ||
388 | phys_to_virt(buffer_info->dma), | ||
389 | adapter->rx_ps_bsize0, true); | ||
390 | } | ||
391 | |||
392 | if (i == rx_ring->next_to_use) | ||
393 | printk(KERN_CONT " NTU\n"); | ||
394 | else if (i == rx_ring->next_to_clean) | ||
395 | printk(KERN_CONT " NTC\n"); | ||
396 | else | ||
397 | printk(KERN_CONT "\n"); | ||
398 | } | ||
399 | break; | ||
400 | default: | ||
401 | case 0: | ||
402 | /* Legacy Receive Descriptor Format | ||
403 | * | ||
404 | * +-----------------------------------------------------+ | ||
405 | * | Buffer Address [63:0] | | ||
406 | * +-----------------------------------------------------+ | ||
407 | * | VLAN Tag | Errors | Status 0 | Packet csum | Length | | ||
408 | * +-----------------------------------------------------+ | ||
409 | * 63 48 47 40 39 32 31 16 15 0 | ||
410 | */ | ||
411 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " | ||
412 | "[vl er S cks ln] [bi->dma ] [bi->skb] " | ||
413 | "<-- Legacy format\n"); | ||
414 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { | ||
415 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
416 | buffer_info = &rx_ring->buffer_info[i]; | ||
417 | u0 = (struct my_u0 *)rx_desc; | ||
418 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " | ||
419 | "%016llX %p", i, | ||
420 | (unsigned long long)le64_to_cpu(u0->a), | ||
421 | (unsigned long long)le64_to_cpu(u0->b), | ||
422 | (unsigned long long)buffer_info->dma, | ||
423 | buffer_info->skb); | ||
424 | if (i == rx_ring->next_to_use) | ||
425 | printk(KERN_CONT " NTU\n"); | ||
426 | else if (i == rx_ring->next_to_clean) | ||
427 | printk(KERN_CONT " NTC\n"); | ||
428 | else | ||
429 | printk(KERN_CONT "\n"); | ||
430 | |||
431 | if (netif_msg_pktdata(adapter)) | ||
432 | print_hex_dump(KERN_INFO, "", | ||
433 | DUMP_PREFIX_ADDRESS, | ||
434 | 16, 1, | ||
435 | phys_to_virt(buffer_info->dma), | ||
436 | adapter->rx_buffer_len, true); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | exit: | ||
441 | return; | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * e1000_desc_unused - calculate if we have unused descriptors | ||
446 | **/ | ||
447 | static int e1000_desc_unused(struct e1000_ring *ring) | ||
448 | { | ||
449 | if (ring->next_to_clean > ring->next_to_use) | ||
450 | return ring->next_to_clean - ring->next_to_use - 1; | ||
451 | |||
452 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * e1000_receive_skb - helper function to handle Rx indications | ||
457 | * @adapter: board private structure | ||
458 | * @status: descriptor status field as written by hardware | ||
459 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | ||
460 | * @skb: pointer to sk_buff to be indicated to stack | ||
461 | **/ | ||
462 | static void e1000_receive_skb(struct e1000_adapter *adapter, | ||
463 | struct net_device *netdev, struct sk_buff *skb, | ||
464 | u8 status, __le16 vlan) | ||
465 | { | ||
466 | u16 tag = le16_to_cpu(vlan); | ||
467 | skb->protocol = eth_type_trans(skb, netdev); | ||
468 | |||
469 | if (status & E1000_RXD_STAT_VP) | ||
470 | __vlan_hwaccel_put_tag(skb, tag); | ||
471 | |||
472 | napi_gro_receive(&adapter->napi, skb); | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * e1000_rx_checksum - Receive Checksum Offload | ||
477 | * @adapter: board private structure | ||
478 | * @status_err: receive descriptor status and error fields | ||
479 | * @csum: receive descriptor csum field | ||
480 | * @sk_buff: socket buffer with received data | ||
481 | **/ | ||
482 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | ||
483 | u32 csum, struct sk_buff *skb) | ||
484 | { | ||
485 | u16 status = (u16)status_err; | ||
486 | u8 errors = (u8)(status_err >> 24); | ||
487 | |||
488 | skb_checksum_none_assert(skb); | ||
489 | |||
490 | /* Ignore Checksum bit is set */ | ||
491 | if (status & E1000_RXD_STAT_IXSM) | ||
492 | return; | ||
493 | /* TCP/UDP checksum error bit is set */ | ||
494 | if (errors & E1000_RXD_ERR_TCPE) { | ||
495 | /* let the stack verify checksum errors */ | ||
496 | adapter->hw_csum_err++; | ||
497 | return; | ||
498 | } | ||
499 | |||
500 | /* TCP/UDP Checksum has not been calculated */ | ||
501 | if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) | ||
502 | return; | ||
503 | |||
504 | /* It must be a TCP or UDP packet with a valid checksum */ | ||
505 | if (status & E1000_RXD_STAT_TCPCS) { | ||
506 | /* TCP checksum is good */ | ||
507 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
508 | } else { | ||
509 | /* | ||
510 | * IP fragment with UDP payload | ||
511 | * Hardware complements the payload checksum, so we undo it | ||
512 | * and then put the value in host order for further stack use. | ||
513 | */ | ||
514 | __sum16 sum = (__force __sum16)htons(csum); | ||
515 | skb->csum = csum_unfold(~sum); | ||
516 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
517 | } | ||
518 | adapter->hw_csum_good++; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | ||
523 | * @adapter: address of board private structure | ||
524 | **/ | ||
525 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | ||
526 | int cleaned_count, gfp_t gfp) | ||
527 | { | ||
528 | struct net_device *netdev = adapter->netdev; | ||
529 | struct pci_dev *pdev = adapter->pdev; | ||
530 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
531 | struct e1000_rx_desc *rx_desc; | ||
532 | struct e1000_buffer *buffer_info; | ||
533 | struct sk_buff *skb; | ||
534 | unsigned int i; | ||
535 | unsigned int bufsz = adapter->rx_buffer_len; | ||
536 | |||
537 | i = rx_ring->next_to_use; | ||
538 | buffer_info = &rx_ring->buffer_info[i]; | ||
539 | |||
540 | while (cleaned_count--) { | ||
541 | skb = buffer_info->skb; | ||
542 | if (skb) { | ||
543 | skb_trim(skb, 0); | ||
544 | goto map_skb; | ||
545 | } | ||
546 | |||
547 | skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); | ||
548 | if (!skb) { | ||
549 | /* Better luck next round */ | ||
550 | adapter->alloc_rx_buff_failed++; | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | buffer_info->skb = skb; | ||
555 | map_skb: | ||
556 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, | ||
557 | adapter->rx_buffer_len, | ||
558 | DMA_FROM_DEVICE); | ||
559 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
560 | dev_err(&pdev->dev, "Rx DMA map failed\n"); | ||
561 | adapter->rx_dma_failed++; | ||
562 | break; | ||
563 | } | ||
564 | |||
565 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
566 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
567 | |||
568 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | ||
569 | /* | ||
570 | * Force memory writes to complete before letting h/w | ||
571 | * know there are new descriptors to fetch. (Only | ||
572 | * applicable for weak-ordered memory model archs, | ||
573 | * such as IA-64). | ||
574 | */ | ||
575 | wmb(); | ||
576 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
577 | } | ||
578 | i++; | ||
579 | if (i == rx_ring->count) | ||
580 | i = 0; | ||
581 | buffer_info = &rx_ring->buffer_info[i]; | ||
582 | } | ||
583 | |||
584 | rx_ring->next_to_use = i; | ||
585 | } | ||
586 | |||
587 | /** | ||
588 | * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split | ||
589 | * @adapter: address of board private structure | ||
590 | **/ | ||
591 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | ||
592 | int cleaned_count, gfp_t gfp) | ||
593 | { | ||
594 | struct net_device *netdev = adapter->netdev; | ||
595 | struct pci_dev *pdev = adapter->pdev; | ||
596 | union e1000_rx_desc_packet_split *rx_desc; | ||
597 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
598 | struct e1000_buffer *buffer_info; | ||
599 | struct e1000_ps_page *ps_page; | ||
600 | struct sk_buff *skb; | ||
601 | unsigned int i, j; | ||
602 | |||
603 | i = rx_ring->next_to_use; | ||
604 | buffer_info = &rx_ring->buffer_info[i]; | ||
605 | |||
606 | while (cleaned_count--) { | ||
607 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | ||
608 | |||
609 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | ||
610 | ps_page = &buffer_info->ps_pages[j]; | ||
611 | if (j >= adapter->rx_ps_pages) { | ||
612 | /* all unused desc entries get hw null ptr */ | ||
613 | rx_desc->read.buffer_addr[j + 1] = | ||
614 | ~cpu_to_le64(0); | ||
615 | continue; | ||
616 | } | ||
617 | if (!ps_page->page) { | ||
618 | ps_page->page = alloc_page(gfp); | ||
619 | if (!ps_page->page) { | ||
620 | adapter->alloc_rx_buff_failed++; | ||
621 | goto no_buffers; | ||
622 | } | ||
623 | ps_page->dma = dma_map_page(&pdev->dev, | ||
624 | ps_page->page, | ||
625 | 0, PAGE_SIZE, | ||
626 | DMA_FROM_DEVICE); | ||
627 | if (dma_mapping_error(&pdev->dev, | ||
628 | ps_page->dma)) { | ||
629 | dev_err(&adapter->pdev->dev, | ||
630 | "Rx DMA page map failed\n"); | ||
631 | adapter->rx_dma_failed++; | ||
632 | goto no_buffers; | ||
633 | } | ||
634 | } | ||
635 | /* | ||
636 | * Refresh the desc even if buffer_addrs | ||
637 | * didn't change because each write-back | ||
638 | * erases this info. | ||
639 | */ | ||
640 | rx_desc->read.buffer_addr[j + 1] = | ||
641 | cpu_to_le64(ps_page->dma); | ||
642 | } | ||
643 | |||
644 | skb = __netdev_alloc_skb_ip_align(netdev, | ||
645 | adapter->rx_ps_bsize0, | ||
646 | gfp); | ||
647 | |||
648 | if (!skb) { | ||
649 | adapter->alloc_rx_buff_failed++; | ||
650 | break; | ||
651 | } | ||
652 | |||
653 | buffer_info->skb = skb; | ||
654 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, | ||
655 | adapter->rx_ps_bsize0, | ||
656 | DMA_FROM_DEVICE); | ||
657 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
658 | dev_err(&pdev->dev, "Rx DMA map failed\n"); | ||
659 | adapter->rx_dma_failed++; | ||
660 | /* cleanup skb */ | ||
661 | dev_kfree_skb_any(skb); | ||
662 | buffer_info->skb = NULL; | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | ||
667 | |||
668 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | ||
669 | /* | ||
670 | * Force memory writes to complete before letting h/w | ||
671 | * know there are new descriptors to fetch. (Only | ||
672 | * applicable for weak-ordered memory model archs, | ||
673 | * such as IA-64). | ||
674 | */ | ||
675 | wmb(); | ||
676 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); | ||
677 | } | ||
678 | |||
679 | i++; | ||
680 | if (i == rx_ring->count) | ||
681 | i = 0; | ||
682 | buffer_info = &rx_ring->buffer_info[i]; | ||
683 | } | ||
684 | |||
685 | no_buffers: | ||
686 | rx_ring->next_to_use = i; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers | ||
691 | * @adapter: address of board private structure | ||
692 | * @cleaned_count: number of buffers to allocate this pass | ||
693 | **/ | ||
694 | |||
695 | static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | ||
696 | int cleaned_count, gfp_t gfp) | ||
697 | { | ||
698 | struct net_device *netdev = adapter->netdev; | ||
699 | struct pci_dev *pdev = adapter->pdev; | ||
700 | struct e1000_rx_desc *rx_desc; | ||
701 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
702 | struct e1000_buffer *buffer_info; | ||
703 | struct sk_buff *skb; | ||
704 | unsigned int i; | ||
705 | unsigned int bufsz = 256 - 16 /* for skb_reserve */; | ||
706 | |||
707 | i = rx_ring->next_to_use; | ||
708 | buffer_info = &rx_ring->buffer_info[i]; | ||
709 | |||
710 | while (cleaned_count--) { | ||
711 | skb = buffer_info->skb; | ||
712 | if (skb) { | ||
713 | skb_trim(skb, 0); | ||
714 | goto check_page; | ||
715 | } | ||
716 | |||
717 | skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); | ||
718 | if (unlikely(!skb)) { | ||
719 | /* Better luck next round */ | ||
720 | adapter->alloc_rx_buff_failed++; | ||
721 | break; | ||
722 | } | ||
723 | |||
724 | buffer_info->skb = skb; | ||
725 | check_page: | ||
726 | /* allocate a new page if necessary */ | ||
727 | if (!buffer_info->page) { | ||
728 | buffer_info->page = alloc_page(gfp); | ||
729 | if (unlikely(!buffer_info->page)) { | ||
730 | adapter->alloc_rx_buff_failed++; | ||
731 | break; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | if (!buffer_info->dma) | ||
736 | buffer_info->dma = dma_map_page(&pdev->dev, | ||
737 | buffer_info->page, 0, | ||
738 | PAGE_SIZE, | ||
739 | DMA_FROM_DEVICE); | ||
740 | |||
741 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
742 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
743 | |||
744 | if (unlikely(++i == rx_ring->count)) | ||
745 | i = 0; | ||
746 | buffer_info = &rx_ring->buffer_info[i]; | ||
747 | } | ||
748 | |||
749 | if (likely(rx_ring->next_to_use != i)) { | ||
750 | rx_ring->next_to_use = i; | ||
751 | if (unlikely(i-- == 0)) | ||
752 | i = (rx_ring->count - 1); | ||
753 | |||
754 | /* Force memory writes to complete before letting h/w | ||
755 | * know there are new descriptors to fetch. (Only | ||
756 | * applicable for weak-ordered memory model archs, | ||
757 | * such as IA-64). */ | ||
758 | wmb(); | ||
759 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
760 | } | ||
761 | } | ||
762 | |||
763 | /** | ||
764 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | ||
765 | * @adapter: board private structure | ||
766 | * | ||
767 | * the return value indicates whether actual cleaning was done, there | ||
768 | * is no guarantee that everything was cleaned | ||
769 | **/ | ||
770 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
771 | int *work_done, int work_to_do) | ||
772 | { | ||
773 | struct net_device *netdev = adapter->netdev; | ||
774 | struct pci_dev *pdev = adapter->pdev; | ||
775 | struct e1000_hw *hw = &adapter->hw; | ||
776 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
777 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
778 | struct e1000_buffer *buffer_info, *next_buffer; | ||
779 | u32 length; | ||
780 | unsigned int i; | ||
781 | int cleaned_count = 0; | ||
782 | bool cleaned = 0; | ||
783 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
784 | |||
785 | i = rx_ring->next_to_clean; | ||
786 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
787 | buffer_info = &rx_ring->buffer_info[i]; | ||
788 | |||
789 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
790 | struct sk_buff *skb; | ||
791 | u8 status; | ||
792 | |||
793 | if (*work_done >= work_to_do) | ||
794 | break; | ||
795 | (*work_done)++; | ||
796 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
797 | |||
798 | status = rx_desc->status; | ||
799 | skb = buffer_info->skb; | ||
800 | buffer_info->skb = NULL; | ||
801 | |||
802 | prefetch(skb->data - NET_IP_ALIGN); | ||
803 | |||
804 | i++; | ||
805 | if (i == rx_ring->count) | ||
806 | i = 0; | ||
807 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
808 | prefetch(next_rxd); | ||
809 | |||
810 | next_buffer = &rx_ring->buffer_info[i]; | ||
811 | |||
812 | cleaned = 1; | ||
813 | cleaned_count++; | ||
814 | dma_unmap_single(&pdev->dev, | ||
815 | buffer_info->dma, | ||
816 | adapter->rx_buffer_len, | ||
817 | DMA_FROM_DEVICE); | ||
818 | buffer_info->dma = 0; | ||
819 | |||
820 | length = le16_to_cpu(rx_desc->length); | ||
821 | |||
822 | /* | ||
823 | * !EOP means multiple descriptors were used to store a single | ||
824 | * packet, if that's the case we need to toss it. In fact, we | ||
825 | * need to toss every packet with the EOP bit clear and the | ||
826 | * next frame that _does_ have the EOP bit set, as it is by | ||
827 | * definition only a frame fragment | ||
828 | */ | ||
829 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
830 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
831 | |||
832 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
833 | /* All receives must fit into a single buffer */ | ||
834 | e_dbg("Receive packet consumed multiple buffers\n"); | ||
835 | /* recycle */ | ||
836 | buffer_info->skb = skb; | ||
837 | if (status & E1000_RXD_STAT_EOP) | ||
838 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
839 | goto next_desc; | ||
840 | } | ||
841 | |||
842 | if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { | ||
843 | /* recycle */ | ||
844 | buffer_info->skb = skb; | ||
845 | goto next_desc; | ||
846 | } | ||
847 | |||
848 | /* adjust length to remove Ethernet CRC */ | ||
849 | if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | ||
850 | length -= 4; | ||
851 | |||
852 | total_rx_bytes += length; | ||
853 | total_rx_packets++; | ||
854 | |||
855 | /* | ||
856 | * code added for copybreak, this should improve | ||
857 | * performance for small packets with large amounts | ||
858 | * of reassembly being done in the stack | ||
859 | */ | ||
860 | if (length < copybreak) { | ||
861 | struct sk_buff *new_skb = | ||
862 | netdev_alloc_skb_ip_align(netdev, length); | ||
863 | if (new_skb) { | ||
864 | skb_copy_to_linear_data_offset(new_skb, | ||
865 | -NET_IP_ALIGN, | ||
866 | (skb->data - | ||
867 | NET_IP_ALIGN), | ||
868 | (length + | ||
869 | NET_IP_ALIGN)); | ||
870 | /* save the skb in buffer_info as good */ | ||
871 | buffer_info->skb = skb; | ||
872 | skb = new_skb; | ||
873 | } | ||
874 | /* else just continue with the old one */ | ||
875 | } | ||
876 | /* end copybreak code */ | ||
877 | skb_put(skb, length); | ||
878 | |||
879 | /* Receive Checksum Offload */ | ||
880 | e1000_rx_checksum(adapter, | ||
881 | (u32)(status) | | ||
882 | ((u32)(rx_desc->errors) << 24), | ||
883 | le16_to_cpu(rx_desc->csum), skb); | ||
884 | |||
885 | e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); | ||
886 | |||
887 | next_desc: | ||
888 | rx_desc->status = 0; | ||
889 | |||
890 | /* return some buffers to hardware, one at a time is too slow */ | ||
891 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | ||
892 | adapter->alloc_rx_buf(adapter, cleaned_count, | ||
893 | GFP_ATOMIC); | ||
894 | cleaned_count = 0; | ||
895 | } | ||
896 | |||
897 | /* use prefetched values */ | ||
898 | rx_desc = next_rxd; | ||
899 | buffer_info = next_buffer; | ||
900 | } | ||
901 | rx_ring->next_to_clean = i; | ||
902 | |||
903 | cleaned_count = e1000_desc_unused(rx_ring); | ||
904 | if (cleaned_count) | ||
905 | adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | ||
906 | |||
907 | adapter->total_rx_bytes += total_rx_bytes; | ||
908 | adapter->total_rx_packets += total_rx_packets; | ||
909 | return cleaned; | ||
910 | } | ||
911 | |||
912 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | ||
913 | struct e1000_buffer *buffer_info) | ||
914 | { | ||
915 | if (buffer_info->dma) { | ||
916 | if (buffer_info->mapped_as_page) | ||
917 | dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, | ||
918 | buffer_info->length, DMA_TO_DEVICE); | ||
919 | else | ||
920 | dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, | ||
921 | buffer_info->length, DMA_TO_DEVICE); | ||
922 | buffer_info->dma = 0; | ||
923 | } | ||
924 | if (buffer_info->skb) { | ||
925 | dev_kfree_skb_any(buffer_info->skb); | ||
926 | buffer_info->skb = NULL; | ||
927 | } | ||
928 | buffer_info->time_stamp = 0; | ||
929 | } | ||
930 | |||
931 | static void e1000_print_hw_hang(struct work_struct *work) | ||
932 | { | ||
933 | struct e1000_adapter *adapter = container_of(work, | ||
934 | struct e1000_adapter, | ||
935 | print_hang_task); | ||
936 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
937 | unsigned int i = tx_ring->next_to_clean; | ||
938 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | ||
939 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | ||
940 | struct e1000_hw *hw = &adapter->hw; | ||
941 | u16 phy_status, phy_1000t_status, phy_ext_status; | ||
942 | u16 pci_status; | ||
943 | |||
944 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
945 | return; | ||
946 | |||
947 | e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
948 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | ||
949 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | ||
950 | |||
951 | pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); | ||
952 | |||
953 | /* detected Hardware unit hang */ | ||
954 | e_err("Detected Hardware Unit Hang:\n" | ||
955 | " TDH <%x>\n" | ||
956 | " TDT <%x>\n" | ||
957 | " next_to_use <%x>\n" | ||
958 | " next_to_clean <%x>\n" | ||
959 | "buffer_info[next_to_clean]:\n" | ||
960 | " time_stamp <%lx>\n" | ||
961 | " next_to_watch <%x>\n" | ||
962 | " jiffies <%lx>\n" | ||
963 | " next_to_watch.status <%x>\n" | ||
964 | "MAC Status <%x>\n" | ||
965 | "PHY Status <%x>\n" | ||
966 | "PHY 1000BASE-T Status <%x>\n" | ||
967 | "PHY Extended Status <%x>\n" | ||
968 | "PCI Status <%x>\n", | ||
969 | readl(adapter->hw.hw_addr + tx_ring->head), | ||
970 | readl(adapter->hw.hw_addr + tx_ring->tail), | ||
971 | tx_ring->next_to_use, | ||
972 | tx_ring->next_to_clean, | ||
973 | tx_ring->buffer_info[eop].time_stamp, | ||
974 | eop, | ||
975 | jiffies, | ||
976 | eop_desc->upper.fields.status, | ||
977 | er32(STATUS), | ||
978 | phy_status, | ||
979 | phy_1000t_status, | ||
980 | phy_ext_status, | ||
981 | pci_status); | ||
982 | } | ||
983 | |||
984 | /** | ||
985 | * e1000_clean_tx_irq - Reclaim resources after transmit completes | ||
986 | * @adapter: board private structure | ||
987 | * | ||
988 | * the return value indicates whether actual cleaning was done, there | ||
989 | * is no guarantee that everything was cleaned | ||
990 | **/ | ||
991 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | ||
992 | { | ||
993 | struct net_device *netdev = adapter->netdev; | ||
994 | struct e1000_hw *hw = &adapter->hw; | ||
995 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
996 | struct e1000_tx_desc *tx_desc, *eop_desc; | ||
997 | struct e1000_buffer *buffer_info; | ||
998 | unsigned int i, eop; | ||
999 | unsigned int count = 0; | ||
1000 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; | ||
1001 | |||
1002 | i = tx_ring->next_to_clean; | ||
1003 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
1004 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | ||
1005 | |||
1006 | while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && | ||
1007 | (count < tx_ring->count)) { | ||
1008 | bool cleaned = false; | ||
1009 | rmb(); /* read buffer_info after eop_desc */ | ||
1010 | for (; !cleaned; count++) { | ||
1011 | tx_desc = E1000_TX_DESC(*tx_ring, i); | ||
1012 | buffer_info = &tx_ring->buffer_info[i]; | ||
1013 | cleaned = (i == eop); | ||
1014 | |||
1015 | if (cleaned) { | ||
1016 | total_tx_packets += buffer_info->segs; | ||
1017 | total_tx_bytes += buffer_info->bytecount; | ||
1018 | } | ||
1019 | |||
1020 | e1000_put_txbuf(adapter, buffer_info); | ||
1021 | tx_desc->upper.data = 0; | ||
1022 | |||
1023 | i++; | ||
1024 | if (i == tx_ring->count) | ||
1025 | i = 0; | ||
1026 | } | ||
1027 | |||
1028 | if (i == tx_ring->next_to_use) | ||
1029 | break; | ||
1030 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
1031 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | ||
1032 | } | ||
1033 | |||
1034 | tx_ring->next_to_clean = i; | ||
1035 | |||
1036 | #define TX_WAKE_THRESHOLD 32 | ||
1037 | if (count && netif_carrier_ok(netdev) && | ||
1038 | e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { | ||
1039 | /* Make sure that anybody stopping the queue after this | ||
1040 | * sees the new next_to_clean. | ||
1041 | */ | ||
1042 | smp_mb(); | ||
1043 | |||
1044 | if (netif_queue_stopped(netdev) && | ||
1045 | !(test_bit(__E1000_DOWN, &adapter->state))) { | ||
1046 | netif_wake_queue(netdev); | ||
1047 | ++adapter->restart_queue; | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | if (adapter->detect_tx_hung) { | ||
1052 | /* | ||
1053 | * Detect a transmit hang in hardware, this serializes the | ||
1054 | * check with the clearing of time_stamp and movement of i | ||
1055 | */ | ||
1056 | adapter->detect_tx_hung = 0; | ||
1057 | if (tx_ring->buffer_info[i].time_stamp && | ||
1058 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp | ||
1059 | + (adapter->tx_timeout_factor * HZ)) && | ||
1060 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { | ||
1061 | schedule_work(&adapter->print_hang_task); | ||
1062 | netif_stop_queue(netdev); | ||
1063 | } | ||
1064 | } | ||
1065 | adapter->total_tx_bytes += total_tx_bytes; | ||
1066 | adapter->total_tx_packets += total_tx_packets; | ||
1067 | return count < tx_ring->count; | ||
1068 | } | ||
1069 | |||
1070 | /** | ||
1071 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split | ||
1072 | * @adapter: board private structure | ||
1073 | * | ||
1074 | * the return value indicates whether actual cleaning was done, there | ||
1075 | * is no guarantee that everything was cleaned | ||
1076 | **/ | ||
1077 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | ||
1078 | int *work_done, int work_to_do) | ||
1079 | { | ||
1080 | struct e1000_hw *hw = &adapter->hw; | ||
1081 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | ||
1082 | struct net_device *netdev = adapter->netdev; | ||
1083 | struct pci_dev *pdev = adapter->pdev; | ||
1084 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
1085 | struct e1000_buffer *buffer_info, *next_buffer; | ||
1086 | struct e1000_ps_page *ps_page; | ||
1087 | struct sk_buff *skb; | ||
1088 | unsigned int i, j; | ||
1089 | u32 length, staterr; | ||
1090 | int cleaned_count = 0; | ||
1091 | bool cleaned = 0; | ||
1092 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
1093 | |||
1094 | i = rx_ring->next_to_clean; | ||
1095 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | ||
1096 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | ||
1097 | buffer_info = &rx_ring->buffer_info[i]; | ||
1098 | |||
1099 | while (staterr & E1000_RXD_STAT_DD) { | ||
1100 | if (*work_done >= work_to_do) | ||
1101 | break; | ||
1102 | (*work_done)++; | ||
1103 | skb = buffer_info->skb; | ||
1104 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
1105 | |||
1106 | /* in the packet split case this is header only */ | ||
1107 | prefetch(skb->data - NET_IP_ALIGN); | ||
1108 | |||
1109 | i++; | ||
1110 | if (i == rx_ring->count) | ||
1111 | i = 0; | ||
1112 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | ||
1113 | prefetch(next_rxd); | ||
1114 | |||
1115 | next_buffer = &rx_ring->buffer_info[i]; | ||
1116 | |||
1117 | cleaned = 1; | ||
1118 | cleaned_count++; | ||
1119 | dma_unmap_single(&pdev->dev, buffer_info->dma, | ||
1120 | adapter->rx_ps_bsize0, DMA_FROM_DEVICE); | ||
1121 | buffer_info->dma = 0; | ||
1122 | |||
1123 | /* see !EOP comment in other Rx routine */ | ||
1124 | if (!(staterr & E1000_RXD_STAT_EOP)) | ||
1125 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
1126 | |||
1127 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
1128 | e_dbg("Packet Split buffers didn't pick up the full " | ||
1129 | "packet\n"); | ||
1130 | dev_kfree_skb_irq(skb); | ||
1131 | if (staterr & E1000_RXD_STAT_EOP) | ||
1132 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
1133 | goto next_desc; | ||
1134 | } | ||
1135 | |||
1136 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | ||
1137 | dev_kfree_skb_irq(skb); | ||
1138 | goto next_desc; | ||
1139 | } | ||
1140 | |||
1141 | length = le16_to_cpu(rx_desc->wb.middle.length0); | ||
1142 | |||
1143 | if (!length) { | ||
1144 | e_dbg("Last part of the packet spanning multiple " | ||
1145 | "descriptors\n"); | ||
1146 | dev_kfree_skb_irq(skb); | ||
1147 | goto next_desc; | ||
1148 | } | ||
1149 | |||
1150 | /* Good Receive */ | ||
1151 | skb_put(skb, length); | ||
1152 | |||
1153 | { | ||
1154 | /* | ||
1155 | * this looks ugly, but it seems compiler issues make it | ||
1156 | * more efficient than reusing j | ||
1157 | */ | ||
1158 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | ||
1159 | |||
1160 | /* | ||
1161 | * page alloc/put takes too long and effects small packet | ||
1162 | * throughput, so unsplit small packets and save the alloc/put | ||
1163 | * only valid in softirq (napi) context to call kmap_* | ||
1164 | */ | ||
1165 | if (l1 && (l1 <= copybreak) && | ||
1166 | ((length + l1) <= adapter->rx_ps_bsize0)) { | ||
1167 | u8 *vaddr; | ||
1168 | |||
1169 | ps_page = &buffer_info->ps_pages[0]; | ||
1170 | |||
1171 | /* | ||
1172 | * there is no documentation about how to call | ||
1173 | * kmap_atomic, so we can't hold the mapping | ||
1174 | * very long | ||
1175 | */ | ||
1176 | dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, | ||
1177 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1178 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); | ||
1179 | memcpy(skb_tail_pointer(skb), vaddr, l1); | ||
1180 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | ||
1181 | dma_sync_single_for_device(&pdev->dev, ps_page->dma, | ||
1182 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1183 | |||
1184 | /* remove the CRC */ | ||
1185 | if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | ||
1186 | l1 -= 4; | ||
1187 | |||
1188 | skb_put(skb, l1); | ||
1189 | goto copydone; | ||
1190 | } /* if */ | ||
1191 | } | ||
1192 | |||
1193 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | ||
1194 | length = le16_to_cpu(rx_desc->wb.upper.length[j]); | ||
1195 | if (!length) | ||
1196 | break; | ||
1197 | |||
1198 | ps_page = &buffer_info->ps_pages[j]; | ||
1199 | dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, | ||
1200 | DMA_FROM_DEVICE); | ||
1201 | ps_page->dma = 0; | ||
1202 | skb_fill_page_desc(skb, j, ps_page->page, 0, length); | ||
1203 | ps_page->page = NULL; | ||
1204 | skb->len += length; | ||
1205 | skb->data_len += length; | ||
1206 | skb->truesize += length; | ||
1207 | } | ||
1208 | |||
1209 | /* strip the ethernet crc, problem is we're using pages now so | ||
1210 | * this whole operation can get a little cpu intensive | ||
1211 | */ | ||
1212 | if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | ||
1213 | pskb_trim(skb, skb->len - 4); | ||
1214 | |||
1215 | copydone: | ||
1216 | total_rx_bytes += skb->len; | ||
1217 | total_rx_packets++; | ||
1218 | |||
1219 | e1000_rx_checksum(adapter, staterr, le16_to_cpu( | ||
1220 | rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | ||
1221 | |||
1222 | if (rx_desc->wb.upper.header_status & | ||
1223 | cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) | ||
1224 | adapter->rx_hdr_split++; | ||
1225 | |||
1226 | e1000_receive_skb(adapter, netdev, skb, | ||
1227 | staterr, rx_desc->wb.middle.vlan); | ||
1228 | |||
1229 | next_desc: | ||
1230 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | ||
1231 | buffer_info->skb = NULL; | ||
1232 | |||
1233 | /* return some buffers to hardware, one at a time is too slow */ | ||
1234 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | ||
1235 | adapter->alloc_rx_buf(adapter, cleaned_count, | ||
1236 | GFP_ATOMIC); | ||
1237 | cleaned_count = 0; | ||
1238 | } | ||
1239 | |||
1240 | /* use prefetched values */ | ||
1241 | rx_desc = next_rxd; | ||
1242 | buffer_info = next_buffer; | ||
1243 | |||
1244 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | ||
1245 | } | ||
1246 | rx_ring->next_to_clean = i; | ||
1247 | |||
1248 | cleaned_count = e1000_desc_unused(rx_ring); | ||
1249 | if (cleaned_count) | ||
1250 | adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | ||
1251 | |||
1252 | adapter->total_rx_bytes += total_rx_bytes; | ||
1253 | adapter->total_rx_packets += total_rx_packets; | ||
1254 | return cleaned; | ||
1255 | } | ||
1256 | |||
1257 | /** | ||
1258 | * e1000_consume_page - helper function | ||
1259 | **/ | ||
1260 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | ||
1261 | u16 length) | ||
1262 | { | ||
1263 | bi->page = NULL; | ||
1264 | skb->len += length; | ||
1265 | skb->data_len += length; | ||
1266 | skb->truesize += length; | ||
1267 | } | ||
1268 | |||
1269 | /** | ||
1270 | * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy | ||
1271 | * @adapter: board private structure | ||
1272 | * | ||
1273 | * the return value indicates whether actual cleaning was done, there | ||
1274 | * is no guarantee that everything was cleaned | ||
1275 | **/ | ||
1276 | |||
1277 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | ||
1278 | int *work_done, int work_to_do) | ||
1279 | { | ||
1280 | struct net_device *netdev = adapter->netdev; | ||
1281 | struct pci_dev *pdev = adapter->pdev; | ||
1282 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
1283 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
1284 | struct e1000_buffer *buffer_info, *next_buffer; | ||
1285 | u32 length; | ||
1286 | unsigned int i; | ||
1287 | int cleaned_count = 0; | ||
1288 | bool cleaned = false; | ||
1289 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
1290 | |||
1291 | i = rx_ring->next_to_clean; | ||
1292 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
1293 | buffer_info = &rx_ring->buffer_info[i]; | ||
1294 | |||
1295 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
1296 | struct sk_buff *skb; | ||
1297 | u8 status; | ||
1298 | |||
1299 | if (*work_done >= work_to_do) | ||
1300 | break; | ||
1301 | (*work_done)++; | ||
1302 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
1303 | |||
1304 | status = rx_desc->status; | ||
1305 | skb = buffer_info->skb; | ||
1306 | buffer_info->skb = NULL; | ||
1307 | |||
1308 | ++i; | ||
1309 | if (i == rx_ring->count) | ||
1310 | i = 0; | ||
1311 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
1312 | prefetch(next_rxd); | ||
1313 | |||
1314 | next_buffer = &rx_ring->buffer_info[i]; | ||
1315 | |||
1316 | cleaned = true; | ||
1317 | cleaned_count++; | ||
1318 | dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, | ||
1319 | DMA_FROM_DEVICE); | ||
1320 | buffer_info->dma = 0; | ||
1321 | |||
1322 | length = le16_to_cpu(rx_desc->length); | ||
1323 | |||
1324 | /* errors is only valid for DD + EOP descriptors */ | ||
1325 | if (unlikely((status & E1000_RXD_STAT_EOP) && | ||
1326 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { | ||
1327 | /* recycle both page and skb */ | ||
1328 | buffer_info->skb = skb; | ||
1329 | /* an error means any chain goes out the window | ||
1330 | * too */ | ||
1331 | if (rx_ring->rx_skb_top) | ||
1332 | dev_kfree_skb_irq(rx_ring->rx_skb_top); | ||
1333 | rx_ring->rx_skb_top = NULL; | ||
1334 | goto next_desc; | ||
1335 | } | ||
1336 | |||
1337 | #define rxtop (rx_ring->rx_skb_top) | ||
1338 | if (!(status & E1000_RXD_STAT_EOP)) { | ||
1339 | /* this descriptor is only the beginning (or middle) */ | ||
1340 | if (!rxtop) { | ||
1341 | /* this is the beginning of a chain */ | ||
1342 | rxtop = skb; | ||
1343 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | ||
1344 | 0, length); | ||
1345 | } else { | ||
1346 | /* this is the middle of a chain */ | ||
1347 | skb_fill_page_desc(rxtop, | ||
1348 | skb_shinfo(rxtop)->nr_frags, | ||
1349 | buffer_info->page, 0, length); | ||
1350 | /* re-use the skb, only consumed the page */ | ||
1351 | buffer_info->skb = skb; | ||
1352 | } | ||
1353 | e1000_consume_page(buffer_info, rxtop, length); | ||
1354 | goto next_desc; | ||
1355 | } else { | ||
1356 | if (rxtop) { | ||
1357 | /* end of the chain */ | ||
1358 | skb_fill_page_desc(rxtop, | ||
1359 | skb_shinfo(rxtop)->nr_frags, | ||
1360 | buffer_info->page, 0, length); | ||
1361 | /* re-use the current skb, we only consumed the | ||
1362 | * page */ | ||
1363 | buffer_info->skb = skb; | ||
1364 | skb = rxtop; | ||
1365 | rxtop = NULL; | ||
1366 | e1000_consume_page(buffer_info, skb, length); | ||
1367 | } else { | ||
1368 | /* no chain, got EOP, this buf is the packet | ||
1369 | * copybreak to save the put_page/alloc_page */ | ||
1370 | if (length <= copybreak && | ||
1371 | skb_tailroom(skb) >= length) { | ||
1372 | u8 *vaddr; | ||
1373 | vaddr = kmap_atomic(buffer_info->page, | ||
1374 | KM_SKB_DATA_SOFTIRQ); | ||
1375 | memcpy(skb_tail_pointer(skb), vaddr, | ||
1376 | length); | ||
1377 | kunmap_atomic(vaddr, | ||
1378 | KM_SKB_DATA_SOFTIRQ); | ||
1379 | /* re-use the page, so don't erase | ||
1380 | * buffer_info->page */ | ||
1381 | skb_put(skb, length); | ||
1382 | } else { | ||
1383 | skb_fill_page_desc(skb, 0, | ||
1384 | buffer_info->page, 0, | ||
1385 | length); | ||
1386 | e1000_consume_page(buffer_info, skb, | ||
1387 | length); | ||
1388 | } | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | ||
1393 | e1000_rx_checksum(adapter, | ||
1394 | (u32)(status) | | ||
1395 | ((u32)(rx_desc->errors) << 24), | ||
1396 | le16_to_cpu(rx_desc->csum), skb); | ||
1397 | |||
1398 | /* probably a little skewed due to removing CRC */ | ||
1399 | total_rx_bytes += skb->len; | ||
1400 | total_rx_packets++; | ||
1401 | |||
1402 | /* eth type trans needs skb->data to point to something */ | ||
1403 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
1404 | e_err("pskb_may_pull failed.\n"); | ||
1405 | dev_kfree_skb_irq(skb); | ||
1406 | goto next_desc; | ||
1407 | } | ||
1408 | |||
1409 | e1000_receive_skb(adapter, netdev, skb, status, | ||
1410 | rx_desc->special); | ||
1411 | |||
1412 | next_desc: | ||
1413 | rx_desc->status = 0; | ||
1414 | |||
1415 | /* return some buffers to hardware, one at a time is too slow */ | ||
1416 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
1417 | adapter->alloc_rx_buf(adapter, cleaned_count, | ||
1418 | GFP_ATOMIC); | ||
1419 | cleaned_count = 0; | ||
1420 | } | ||
1421 | |||
1422 | /* use prefetched values */ | ||
1423 | rx_desc = next_rxd; | ||
1424 | buffer_info = next_buffer; | ||
1425 | } | ||
1426 | rx_ring->next_to_clean = i; | ||
1427 | |||
1428 | cleaned_count = e1000_desc_unused(rx_ring); | ||
1429 | if (cleaned_count) | ||
1430 | adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); | ||
1431 | |||
1432 | adapter->total_rx_bytes += total_rx_bytes; | ||
1433 | adapter->total_rx_packets += total_rx_packets; | ||
1434 | return cleaned; | ||
1435 | } | ||
1436 | |||
1437 | /** | ||
1438 | * e1000_clean_rx_ring - Free Rx Buffers per Queue | ||
1439 | * @adapter: board private structure | ||
1440 | **/ | ||
1441 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | ||
1442 | { | ||
1443 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
1444 | struct e1000_buffer *buffer_info; | ||
1445 | struct e1000_ps_page *ps_page; | ||
1446 | struct pci_dev *pdev = adapter->pdev; | ||
1447 | unsigned int i, j; | ||
1448 | |||
1449 | /* Free all the Rx ring sk_buffs */ | ||
1450 | for (i = 0; i < rx_ring->count; i++) { | ||
1451 | buffer_info = &rx_ring->buffer_info[i]; | ||
1452 | if (buffer_info->dma) { | ||
1453 | if (adapter->clean_rx == e1000_clean_rx_irq) | ||
1454 | dma_unmap_single(&pdev->dev, buffer_info->dma, | ||
1455 | adapter->rx_buffer_len, | ||
1456 | DMA_FROM_DEVICE); | ||
1457 | else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) | ||
1458 | dma_unmap_page(&pdev->dev, buffer_info->dma, | ||
1459 | PAGE_SIZE, | ||
1460 | DMA_FROM_DEVICE); | ||
1461 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | ||
1462 | dma_unmap_single(&pdev->dev, buffer_info->dma, | ||
1463 | adapter->rx_ps_bsize0, | ||
1464 | DMA_FROM_DEVICE); | ||
1465 | buffer_info->dma = 0; | ||
1466 | } | ||
1467 | |||
1468 | if (buffer_info->page) { | ||
1469 | put_page(buffer_info->page); | ||
1470 | buffer_info->page = NULL; | ||
1471 | } | ||
1472 | |||
1473 | if (buffer_info->skb) { | ||
1474 | dev_kfree_skb(buffer_info->skb); | ||
1475 | buffer_info->skb = NULL; | ||
1476 | } | ||
1477 | |||
1478 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | ||
1479 | ps_page = &buffer_info->ps_pages[j]; | ||
1480 | if (!ps_page->page) | ||
1481 | break; | ||
1482 | dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, | ||
1483 | DMA_FROM_DEVICE); | ||
1484 | ps_page->dma = 0; | ||
1485 | put_page(ps_page->page); | ||
1486 | ps_page->page = NULL; | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | /* there also may be some cached data from a chained receive */ | ||
1491 | if (rx_ring->rx_skb_top) { | ||
1492 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
1493 | rx_ring->rx_skb_top = NULL; | ||
1494 | } | ||
1495 | |||
1496 | /* Zero out the descriptor ring */ | ||
1497 | memset(rx_ring->desc, 0, rx_ring->size); | ||
1498 | |||
1499 | rx_ring->next_to_clean = 0; | ||
1500 | rx_ring->next_to_use = 0; | ||
1501 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
1502 | |||
1503 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
1504 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
1505 | } | ||
1506 | |||
1507 | static void e1000e_downshift_workaround(struct work_struct *work) | ||
1508 | { | ||
1509 | struct e1000_adapter *adapter = container_of(work, | ||
1510 | struct e1000_adapter, downshift_task); | ||
1511 | |||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | ||
1516 | } | ||
1517 | |||
1518 | /** | ||
1519 | * e1000_intr_msi - Interrupt Handler | ||
1520 | * @irq: interrupt number | ||
1521 | * @data: pointer to a network interface device structure | ||
1522 | **/ | ||
1523 | static irqreturn_t e1000_intr_msi(int irq, void *data) | ||
1524 | { | ||
1525 | struct net_device *netdev = data; | ||
1526 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1527 | struct e1000_hw *hw = &adapter->hw; | ||
1528 | u32 icr = er32(ICR); | ||
1529 | |||
1530 | /* | ||
1531 | * read ICR disables interrupts using IAM | ||
1532 | */ | ||
1533 | |||
1534 | if (icr & E1000_ICR_LSC) { | ||
1535 | hw->mac.get_link_status = 1; | ||
1536 | /* | ||
1537 | * ICH8 workaround-- Call gig speed drop workaround on cable | ||
1538 | * disconnect (LSC) before accessing any PHY registers | ||
1539 | */ | ||
1540 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | ||
1541 | (!(er32(STATUS) & E1000_STATUS_LU))) | ||
1542 | schedule_work(&adapter->downshift_task); | ||
1543 | |||
1544 | /* | ||
1545 | * 80003ES2LAN workaround-- For packet buffer work-around on | ||
1546 | * link down event; disable receives here in the ISR and reset | ||
1547 | * adapter in watchdog | ||
1548 | */ | ||
1549 | if (netif_carrier_ok(netdev) && | ||
1550 | adapter->flags & FLAG_RX_NEEDS_RESTART) { | ||
1551 | /* disable receives */ | ||
1552 | u32 rctl = er32(RCTL); | ||
1553 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
1554 | adapter->flags |= FLAG_RX_RESTART_NOW; | ||
1555 | } | ||
1556 | /* guard against interrupt when we're going down */ | ||
1557 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1558 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1559 | } | ||
1560 | |||
1561 | if (napi_schedule_prep(&adapter->napi)) { | ||
1562 | adapter->total_tx_bytes = 0; | ||
1563 | adapter->total_tx_packets = 0; | ||
1564 | adapter->total_rx_bytes = 0; | ||
1565 | adapter->total_rx_packets = 0; | ||
1566 | __napi_schedule(&adapter->napi); | ||
1567 | } | ||
1568 | |||
1569 | return IRQ_HANDLED; | ||
1570 | } | ||
1571 | |||
1572 | /** | ||
1573 | * e1000_intr - Interrupt Handler | ||
1574 | * @irq: interrupt number | ||
1575 | * @data: pointer to a network interface device structure | ||
1576 | **/ | ||
1577 | static irqreturn_t e1000_intr(int irq, void *data) | ||
1578 | { | ||
1579 | struct net_device *netdev = data; | ||
1580 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1581 | struct e1000_hw *hw = &adapter->hw; | ||
1582 | u32 rctl, icr = er32(ICR); | ||
1583 | |||
1584 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) | ||
1585 | return IRQ_NONE; /* Not our interrupt */ | ||
1586 | |||
1587 | /* | ||
1588 | * IMS will not auto-mask if INT_ASSERTED is not set, and if it is | ||
1589 | * not set, then the adapter didn't send an interrupt | ||
1590 | */ | ||
1591 | if (!(icr & E1000_ICR_INT_ASSERTED)) | ||
1592 | return IRQ_NONE; | ||
1593 | |||
1594 | /* | ||
1595 | * Interrupt Auto-Mask...upon reading ICR, | ||
1596 | * interrupts are masked. No need for the | ||
1597 | * IMC write | ||
1598 | */ | ||
1599 | |||
1600 | if (icr & E1000_ICR_LSC) { | ||
1601 | hw->mac.get_link_status = 1; | ||
1602 | /* | ||
1603 | * ICH8 workaround-- Call gig speed drop workaround on cable | ||
1604 | * disconnect (LSC) before accessing any PHY registers | ||
1605 | */ | ||
1606 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | ||
1607 | (!(er32(STATUS) & E1000_STATUS_LU))) | ||
1608 | schedule_work(&adapter->downshift_task); | ||
1609 | |||
1610 | /* | ||
1611 | * 80003ES2LAN workaround-- | ||
1612 | * For packet buffer work-around on link down event; | ||
1613 | * disable receives here in the ISR and | ||
1614 | * reset adapter in watchdog | ||
1615 | */ | ||
1616 | if (netif_carrier_ok(netdev) && | ||
1617 | (adapter->flags & FLAG_RX_NEEDS_RESTART)) { | ||
1618 | /* disable receives */ | ||
1619 | rctl = er32(RCTL); | ||
1620 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
1621 | adapter->flags |= FLAG_RX_RESTART_NOW; | ||
1622 | } | ||
1623 | /* guard against interrupt when we're going down */ | ||
1624 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1625 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1626 | } | ||
1627 | |||
1628 | if (napi_schedule_prep(&adapter->napi)) { | ||
1629 | adapter->total_tx_bytes = 0; | ||
1630 | adapter->total_tx_packets = 0; | ||
1631 | adapter->total_rx_bytes = 0; | ||
1632 | adapter->total_rx_packets = 0; | ||
1633 | __napi_schedule(&adapter->napi); | ||
1634 | } | ||
1635 | |||
1636 | return IRQ_HANDLED; | ||
1637 | } | ||
1638 | |||
1639 | static irqreturn_t e1000_msix_other(int irq, void *data) | ||
1640 | { | ||
1641 | struct net_device *netdev = data; | ||
1642 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1643 | struct e1000_hw *hw = &adapter->hw; | ||
1644 | u32 icr = er32(ICR); | ||
1645 | |||
1646 | if (!(icr & E1000_ICR_INT_ASSERTED)) { | ||
1647 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1648 | ew32(IMS, E1000_IMS_OTHER); | ||
1649 | return IRQ_NONE; | ||
1650 | } | ||
1651 | |||
1652 | if (icr & adapter->eiac_mask) | ||
1653 | ew32(ICS, (icr & adapter->eiac_mask)); | ||
1654 | |||
1655 | if (icr & E1000_ICR_OTHER) { | ||
1656 | if (!(icr & E1000_ICR_LSC)) | ||
1657 | goto no_link_interrupt; | ||
1658 | hw->mac.get_link_status = 1; | ||
1659 | /* guard against interrupt when we're going down */ | ||
1660 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1661 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1662 | } | ||
1663 | |||
1664 | no_link_interrupt: | ||
1665 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1666 | ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); | ||
1667 | |||
1668 | return IRQ_HANDLED; | ||
1669 | } | ||
1670 | |||
1671 | |||
1672 | static irqreturn_t e1000_intr_msix_tx(int irq, void *data) | ||
1673 | { | ||
1674 | struct net_device *netdev = data; | ||
1675 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1676 | struct e1000_hw *hw = &adapter->hw; | ||
1677 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
1678 | |||
1679 | |||
1680 | adapter->total_tx_bytes = 0; | ||
1681 | adapter->total_tx_packets = 0; | ||
1682 | |||
1683 | if (!e1000_clean_tx_irq(adapter)) | ||
1684 | /* Ring was not completely cleaned, so fire another interrupt */ | ||
1685 | ew32(ICS, tx_ring->ims_val); | ||
1686 | |||
1687 | return IRQ_HANDLED; | ||
1688 | } | ||
1689 | |||
1690 | static irqreturn_t e1000_intr_msix_rx(int irq, void *data) | ||
1691 | { | ||
1692 | struct net_device *netdev = data; | ||
1693 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1694 | |||
1695 | /* Write the ITR value calculated at the end of the | ||
1696 | * previous interrupt. | ||
1697 | */ | ||
1698 | if (adapter->rx_ring->set_itr) { | ||
1699 | writel(1000000000 / (adapter->rx_ring->itr_val * 256), | ||
1700 | adapter->hw.hw_addr + adapter->rx_ring->itr_register); | ||
1701 | adapter->rx_ring->set_itr = 0; | ||
1702 | } | ||
1703 | |||
1704 | if (napi_schedule_prep(&adapter->napi)) { | ||
1705 | adapter->total_rx_bytes = 0; | ||
1706 | adapter->total_rx_packets = 0; | ||
1707 | __napi_schedule(&adapter->napi); | ||
1708 | } | ||
1709 | return IRQ_HANDLED; | ||
1710 | } | ||
1711 | |||
1712 | /** | ||
1713 | * e1000_configure_msix - Configure MSI-X hardware | ||
1714 | * | ||
1715 | * e1000_configure_msix sets up the hardware to properly | ||
1716 | * generate MSI-X interrupts. | ||
1717 | **/ | ||
1718 | static void e1000_configure_msix(struct e1000_adapter *adapter) | ||
1719 | { | ||
1720 | struct e1000_hw *hw = &adapter->hw; | ||
1721 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
1722 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
1723 | int vector = 0; | ||
1724 | u32 ctrl_ext, ivar = 0; | ||
1725 | |||
1726 | adapter->eiac_mask = 0; | ||
1727 | |||
1728 | /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ | ||
1729 | if (hw->mac.type == e1000_82574) { | ||
1730 | u32 rfctl = er32(RFCTL); | ||
1731 | rfctl |= E1000_RFCTL_ACK_DIS; | ||
1732 | ew32(RFCTL, rfctl); | ||
1733 | } | ||
1734 | |||
1735 | #define E1000_IVAR_INT_ALLOC_VALID 0x8 | ||
1736 | /* Configure Rx vector */ | ||
1737 | rx_ring->ims_val = E1000_IMS_RXQ0; | ||
1738 | adapter->eiac_mask |= rx_ring->ims_val; | ||
1739 | if (rx_ring->itr_val) | ||
1740 | writel(1000000000 / (rx_ring->itr_val * 256), | ||
1741 | hw->hw_addr + rx_ring->itr_register); | ||
1742 | else | ||
1743 | writel(1, hw->hw_addr + rx_ring->itr_register); | ||
1744 | ivar = E1000_IVAR_INT_ALLOC_VALID | vector; | ||
1745 | |||
1746 | /* Configure Tx vector */ | ||
1747 | tx_ring->ims_val = E1000_IMS_TXQ0; | ||
1748 | vector++; | ||
1749 | if (tx_ring->itr_val) | ||
1750 | writel(1000000000 / (tx_ring->itr_val * 256), | ||
1751 | hw->hw_addr + tx_ring->itr_register); | ||
1752 | else | ||
1753 | writel(1, hw->hw_addr + tx_ring->itr_register); | ||
1754 | adapter->eiac_mask |= tx_ring->ims_val; | ||
1755 | ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); | ||
1756 | |||
1757 | /* set vector for Other Causes, e.g. link changes */ | ||
1758 | vector++; | ||
1759 | ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); | ||
1760 | if (rx_ring->itr_val) | ||
1761 | writel(1000000000 / (rx_ring->itr_val * 256), | ||
1762 | hw->hw_addr + E1000_EITR_82574(vector)); | ||
1763 | else | ||
1764 | writel(1, hw->hw_addr + E1000_EITR_82574(vector)); | ||
1765 | |||
1766 | /* Cause Tx interrupts on every write back */ | ||
1767 | ivar |= (1 << 31); | ||
1768 | |||
1769 | ew32(IVAR, ivar); | ||
1770 | |||
1771 | /* enable MSI-X PBA support */ | ||
1772 | ctrl_ext = er32(CTRL_EXT); | ||
1773 | ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; | ||
1774 | |||
1775 | /* Auto-Mask Other interrupts upon ICR read */ | ||
1776 | #define E1000_EIAC_MASK_82574 0x01F00000 | ||
1777 | ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); | ||
1778 | ctrl_ext |= E1000_CTRL_EXT_EIAME; | ||
1779 | ew32(CTRL_EXT, ctrl_ext); | ||
1780 | e1e_flush(); | ||
1781 | } | ||
1782 | |||
1783 | void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) | ||
1784 | { | ||
1785 | if (adapter->msix_entries) { | ||
1786 | pci_disable_msix(adapter->pdev); | ||
1787 | kfree(adapter->msix_entries); | ||
1788 | adapter->msix_entries = NULL; | ||
1789 | } else if (adapter->flags & FLAG_MSI_ENABLED) { | ||
1790 | pci_disable_msi(adapter->pdev); | ||
1791 | adapter->flags &= ~FLAG_MSI_ENABLED; | ||
1792 | } | ||
1793 | } | ||
1794 | |||
1795 | /** | ||
1796 | * e1000e_set_interrupt_capability - set MSI or MSI-X if supported | ||
1797 | * | ||
1798 | * Attempt to configure interrupts using the best available | ||
1799 | * capabilities of the hardware and kernel. | ||
1800 | **/ | ||
1801 | void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) | ||
1802 | { | ||
1803 | int err; | ||
1804 | int i; | ||
1805 | |||
1806 | switch (adapter->int_mode) { | ||
1807 | case E1000E_INT_MODE_MSIX: | ||
1808 | if (adapter->flags & FLAG_HAS_MSIX) { | ||
1809 | adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ | ||
1810 | adapter->msix_entries = kcalloc(adapter->num_vectors, | ||
1811 | sizeof(struct msix_entry), | ||
1812 | GFP_KERNEL); | ||
1813 | if (adapter->msix_entries) { | ||
1814 | for (i = 0; i < adapter->num_vectors; i++) | ||
1815 | adapter->msix_entries[i].entry = i; | ||
1816 | |||
1817 | err = pci_enable_msix(adapter->pdev, | ||
1818 | adapter->msix_entries, | ||
1819 | adapter->num_vectors); | ||
1820 | if (err == 0) | ||
1821 | return; | ||
1822 | } | ||
1823 | /* MSI-X failed, so fall through and try MSI */ | ||
1824 | e_err("Failed to initialize MSI-X interrupts. " | ||
1825 | "Falling back to MSI interrupts.\n"); | ||
1826 | e1000e_reset_interrupt_capability(adapter); | ||
1827 | } | ||
1828 | adapter->int_mode = E1000E_INT_MODE_MSI; | ||
1829 | /* Fall through */ | ||
1830 | case E1000E_INT_MODE_MSI: | ||
1831 | if (!pci_enable_msi(adapter->pdev)) { | ||
1832 | adapter->flags |= FLAG_MSI_ENABLED; | ||
1833 | } else { | ||
1834 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | ||
1835 | e_err("Failed to initialize MSI interrupts. Falling " | ||
1836 | "back to legacy interrupts.\n"); | ||
1837 | } | ||
1838 | /* Fall through */ | ||
1839 | case E1000E_INT_MODE_LEGACY: | ||
1840 | /* Don't do anything; this is the system default */ | ||
1841 | break; | ||
1842 | } | ||
1843 | |||
1844 | /* store the number of vectors being used */ | ||
1845 | adapter->num_vectors = 1; | ||
1846 | } | ||
1847 | |||
1848 | /** | ||
1849 | * e1000_request_msix - Initialize MSI-X interrupts | ||
1850 | * | ||
1851 | * e1000_request_msix allocates MSI-X vectors and requests interrupts from the | ||
1852 | * kernel. | ||
1853 | **/ | ||
1854 | static int e1000_request_msix(struct e1000_adapter *adapter) | ||
1855 | { | ||
1856 | struct net_device *netdev = adapter->netdev; | ||
1857 | int err = 0, vector = 0; | ||
1858 | |||
1859 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | ||
1860 | snprintf(adapter->rx_ring->name, | ||
1861 | sizeof(adapter->rx_ring->name) - 1, | ||
1862 | "%s-rx-0", netdev->name); | ||
1863 | else | ||
1864 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | ||
1865 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1866 | e1000_intr_msix_rx, 0, adapter->rx_ring->name, | ||
1867 | netdev); | ||
1868 | if (err) | ||
1869 | goto out; | ||
1870 | adapter->rx_ring->itr_register = E1000_EITR_82574(vector); | ||
1871 | adapter->rx_ring->itr_val = adapter->itr; | ||
1872 | vector++; | ||
1873 | |||
1874 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | ||
1875 | snprintf(adapter->tx_ring->name, | ||
1876 | sizeof(adapter->tx_ring->name) - 1, | ||
1877 | "%s-tx-0", netdev->name); | ||
1878 | else | ||
1879 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | ||
1880 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1881 | e1000_intr_msix_tx, 0, adapter->tx_ring->name, | ||
1882 | netdev); | ||
1883 | if (err) | ||
1884 | goto out; | ||
1885 | adapter->tx_ring->itr_register = E1000_EITR_82574(vector); | ||
1886 | adapter->tx_ring->itr_val = adapter->itr; | ||
1887 | vector++; | ||
1888 | |||
1889 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1890 | e1000_msix_other, 0, netdev->name, netdev); | ||
1891 | if (err) | ||
1892 | goto out; | ||
1893 | |||
1894 | e1000_configure_msix(adapter); | ||
1895 | return 0; | ||
1896 | out: | ||
1897 | return err; | ||
1898 | } | ||
1899 | |||
1900 | /** | ||
1901 | * e1000_request_irq - initialize interrupts | ||
1902 | * | ||
1903 | * Attempts to configure interrupts using the best available | ||
1904 | * capabilities of the hardware and kernel. | ||
1905 | **/ | ||
1906 | static int e1000_request_irq(struct e1000_adapter *adapter) | ||
1907 | { | ||
1908 | struct net_device *netdev = adapter->netdev; | ||
1909 | int err; | ||
1910 | |||
1911 | if (adapter->msix_entries) { | ||
1912 | err = e1000_request_msix(adapter); | ||
1913 | if (!err) | ||
1914 | return err; | ||
1915 | /* fall back to MSI */ | ||
1916 | e1000e_reset_interrupt_capability(adapter); | ||
1917 | adapter->int_mode = E1000E_INT_MODE_MSI; | ||
1918 | e1000e_set_interrupt_capability(adapter); | ||
1919 | } | ||
1920 | if (adapter->flags & FLAG_MSI_ENABLED) { | ||
1921 | err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, | ||
1922 | netdev->name, netdev); | ||
1923 | if (!err) | ||
1924 | return err; | ||
1925 | |||
1926 | /* fall back to legacy interrupt */ | ||
1927 | e1000e_reset_interrupt_capability(adapter); | ||
1928 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | ||
1929 | } | ||
1930 | |||
1931 | err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, | ||
1932 | netdev->name, netdev); | ||
1933 | if (err) | ||
1934 | e_err("Unable to allocate interrupt, Error: %d\n", err); | ||
1935 | |||
1936 | return err; | ||
1937 | } | ||
1938 | |||
1939 | static void e1000_free_irq(struct e1000_adapter *adapter) | ||
1940 | { | ||
1941 | struct net_device *netdev = adapter->netdev; | ||
1942 | |||
1943 | if (adapter->msix_entries) { | ||
1944 | int vector = 0; | ||
1945 | |||
1946 | free_irq(adapter->msix_entries[vector].vector, netdev); | ||
1947 | vector++; | ||
1948 | |||
1949 | free_irq(adapter->msix_entries[vector].vector, netdev); | ||
1950 | vector++; | ||
1951 | |||
1952 | /* Other Causes interrupt vector */ | ||
1953 | free_irq(adapter->msix_entries[vector].vector, netdev); | ||
1954 | return; | ||
1955 | } | ||
1956 | |||
1957 | free_irq(adapter->pdev->irq, netdev); | ||
1958 | } | ||
1959 | |||
1960 | /** | ||
1961 | * e1000_irq_disable - Mask off interrupt generation on the NIC | ||
1962 | **/ | ||
1963 | static void e1000_irq_disable(struct e1000_adapter *adapter) | ||
1964 | { | ||
1965 | struct e1000_hw *hw = &adapter->hw; | ||
1966 | |||
1967 | ew32(IMC, ~0); | ||
1968 | if (adapter->msix_entries) | ||
1969 | ew32(EIAC_82574, 0); | ||
1970 | e1e_flush(); | ||
1971 | |||
1972 | if (adapter->msix_entries) { | ||
1973 | int i; | ||
1974 | for (i = 0; i < adapter->num_vectors; i++) | ||
1975 | synchronize_irq(adapter->msix_entries[i].vector); | ||
1976 | } else { | ||
1977 | synchronize_irq(adapter->pdev->irq); | ||
1978 | } | ||
1979 | } | ||
1980 | |||
1981 | /** | ||
1982 | * e1000_irq_enable - Enable default interrupt generation settings | ||
1983 | **/ | ||
1984 | static void e1000_irq_enable(struct e1000_adapter *adapter) | ||
1985 | { | ||
1986 | struct e1000_hw *hw = &adapter->hw; | ||
1987 | |||
1988 | if (adapter->msix_entries) { | ||
1989 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | ||
1990 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); | ||
1991 | } else { | ||
1992 | ew32(IMS, IMS_ENABLE_MASK); | ||
1993 | } | ||
1994 | e1e_flush(); | ||
1995 | } | ||
1996 | |||
1997 | /** | ||
1998 | * e1000e_get_hw_control - get control of the h/w from f/w | ||
1999 | * @adapter: address of board private structure | ||
2000 | * | ||
2001 | * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. | ||
2002 | * For ASF and Pass Through versions of f/w this means that | ||
2003 | * the driver is loaded. For AMT version (only with 82573) | ||
2004 | * of the f/w this means that the network i/f is open. | ||
2005 | **/ | ||
2006 | void e1000e_get_hw_control(struct e1000_adapter *adapter) | ||
2007 | { | ||
2008 | struct e1000_hw *hw = &adapter->hw; | ||
2009 | u32 ctrl_ext; | ||
2010 | u32 swsm; | ||
2011 | |||
2012 | /* Let firmware know the driver has taken over */ | ||
2013 | if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { | ||
2014 | swsm = er32(SWSM); | ||
2015 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); | ||
2016 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | ||
2017 | ctrl_ext = er32(CTRL_EXT); | ||
2018 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
2019 | } | ||
2020 | } | ||
2021 | |||
2022 | /** | ||
2023 | * e1000e_release_hw_control - release control of the h/w to f/w | ||
2024 | * @adapter: address of board private structure | ||
2025 | * | ||
2026 | * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. | ||
2027 | * For ASF and Pass Through versions of f/w this means that the | ||
2028 | * driver is no longer loaded. For AMT version (only with 82573) i | ||
2029 | * of the f/w this means that the network i/f is closed. | ||
2030 | * | ||
2031 | **/ | ||
2032 | void e1000e_release_hw_control(struct e1000_adapter *adapter) | ||
2033 | { | ||
2034 | struct e1000_hw *hw = &adapter->hw; | ||
2035 | u32 ctrl_ext; | ||
2036 | u32 swsm; | ||
2037 | |||
2038 | /* Let firmware taken over control of h/w */ | ||
2039 | if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { | ||
2040 | swsm = er32(SWSM); | ||
2041 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); | ||
2042 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | ||
2043 | ctrl_ext = er32(CTRL_EXT); | ||
2044 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
2045 | } | ||
2046 | } | ||
2047 | |||
2048 | /** | ||
2049 | * @e1000_alloc_ring - allocate memory for a ring structure | ||
2050 | **/ | ||
2051 | static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | ||
2052 | struct e1000_ring *ring) | ||
2053 | { | ||
2054 | struct pci_dev *pdev = adapter->pdev; | ||
2055 | |||
2056 | ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, | ||
2057 | GFP_KERNEL); | ||
2058 | if (!ring->desc) | ||
2059 | return -ENOMEM; | ||
2060 | |||
2061 | return 0; | ||
2062 | } | ||
2063 | |||
2064 | /** | ||
2065 | * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) | ||
2066 | * @adapter: board private structure | ||
2067 | * | ||
2068 | * Return 0 on success, negative on failure | ||
2069 | **/ | ||
2070 | int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | ||
2071 | { | ||
2072 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
2073 | int err = -ENOMEM, size; | ||
2074 | |||
2075 | size = sizeof(struct e1000_buffer) * tx_ring->count; | ||
2076 | tx_ring->buffer_info = vzalloc(size); | ||
2077 | if (!tx_ring->buffer_info) | ||
2078 | goto err; | ||
2079 | |||
2080 | /* round up to nearest 4K */ | ||
2081 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | ||
2082 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
2083 | |||
2084 | err = e1000_alloc_ring_dma(adapter, tx_ring); | ||
2085 | if (err) | ||
2086 | goto err; | ||
2087 | |||
2088 | tx_ring->next_to_use = 0; | ||
2089 | tx_ring->next_to_clean = 0; | ||
2090 | |||
2091 | return 0; | ||
2092 | err: | ||
2093 | vfree(tx_ring->buffer_info); | ||
2094 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); | ||
2095 | return err; | ||
2096 | } | ||
2097 | |||
2098 | /** | ||
2099 | * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) | ||
2100 | * @adapter: board private structure | ||
2101 | * | ||
2102 | * Returns 0 on success, negative on failure | ||
2103 | **/ | ||
2104 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | ||
2105 | { | ||
2106 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
2107 | struct e1000_buffer *buffer_info; | ||
2108 | int i, size, desc_len, err = -ENOMEM; | ||
2109 | |||
2110 | size = sizeof(struct e1000_buffer) * rx_ring->count; | ||
2111 | rx_ring->buffer_info = vzalloc(size); | ||
2112 | if (!rx_ring->buffer_info) | ||
2113 | goto err; | ||
2114 | |||
2115 | for (i = 0; i < rx_ring->count; i++) { | ||
2116 | buffer_info = &rx_ring->buffer_info[i]; | ||
2117 | buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, | ||
2118 | sizeof(struct e1000_ps_page), | ||
2119 | GFP_KERNEL); | ||
2120 | if (!buffer_info->ps_pages) | ||
2121 | goto err_pages; | ||
2122 | } | ||
2123 | |||
2124 | desc_len = sizeof(union e1000_rx_desc_packet_split); | ||
2125 | |||
2126 | /* Round up to nearest 4K */ | ||
2127 | rx_ring->size = rx_ring->count * desc_len; | ||
2128 | rx_ring->size = ALIGN(rx_ring->size, 4096); | ||
2129 | |||
2130 | err = e1000_alloc_ring_dma(adapter, rx_ring); | ||
2131 | if (err) | ||
2132 | goto err_pages; | ||
2133 | |||
2134 | rx_ring->next_to_clean = 0; | ||
2135 | rx_ring->next_to_use = 0; | ||
2136 | rx_ring->rx_skb_top = NULL; | ||
2137 | |||
2138 | return 0; | ||
2139 | |||
2140 | err_pages: | ||
2141 | for (i = 0; i < rx_ring->count; i++) { | ||
2142 | buffer_info = &rx_ring->buffer_info[i]; | ||
2143 | kfree(buffer_info->ps_pages); | ||
2144 | } | ||
2145 | err: | ||
2146 | vfree(rx_ring->buffer_info); | ||
2147 | e_err("Unable to allocate memory for the receive descriptor ring\n"); | ||
2148 | return err; | ||
2149 | } | ||
2150 | |||
2151 | /** | ||
2152 | * e1000_clean_tx_ring - Free Tx Buffers | ||
2153 | * @adapter: board private structure | ||
2154 | **/ | ||
2155 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter) | ||
2156 | { | ||
2157 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
2158 | struct e1000_buffer *buffer_info; | ||
2159 | unsigned long size; | ||
2160 | unsigned int i; | ||
2161 | |||
2162 | for (i = 0; i < tx_ring->count; i++) { | ||
2163 | buffer_info = &tx_ring->buffer_info[i]; | ||
2164 | e1000_put_txbuf(adapter, buffer_info); | ||
2165 | } | ||
2166 | |||
2167 | size = sizeof(struct e1000_buffer) * tx_ring->count; | ||
2168 | memset(tx_ring->buffer_info, 0, size); | ||
2169 | |||
2170 | memset(tx_ring->desc, 0, tx_ring->size); | ||
2171 | |||
2172 | tx_ring->next_to_use = 0; | ||
2173 | tx_ring->next_to_clean = 0; | ||
2174 | |||
2175 | writel(0, adapter->hw.hw_addr + tx_ring->head); | ||
2176 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | ||
2177 | } | ||
2178 | |||
2179 | /** | ||
2180 | * e1000e_free_tx_resources - Free Tx Resources per Queue | ||
2181 | * @adapter: board private structure | ||
2182 | * | ||
2183 | * Free all transmit software resources | ||
2184 | **/ | ||
2185 | void e1000e_free_tx_resources(struct e1000_adapter *adapter) | ||
2186 | { | ||
2187 | struct pci_dev *pdev = adapter->pdev; | ||
2188 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
2189 | |||
2190 | e1000_clean_tx_ring(adapter); | ||
2191 | |||
2192 | vfree(tx_ring->buffer_info); | ||
2193 | tx_ring->buffer_info = NULL; | ||
2194 | |||
2195 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | ||
2196 | tx_ring->dma); | ||
2197 | tx_ring->desc = NULL; | ||
2198 | } | ||
2199 | |||
2200 | /** | ||
2201 | * e1000e_free_rx_resources - Free Rx Resources | ||
2202 | * @adapter: board private structure | ||
2203 | * | ||
2204 | * Free all receive software resources | ||
2205 | **/ | ||
2206 | |||
2207 | void e1000e_free_rx_resources(struct e1000_adapter *adapter) | ||
2208 | { | ||
2209 | struct pci_dev *pdev = adapter->pdev; | ||
2210 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
2211 | int i; | ||
2212 | |||
2213 | e1000_clean_rx_ring(adapter); | ||
2214 | |||
2215 | for (i = 0; i < rx_ring->count; i++) | ||
2216 | kfree(rx_ring->buffer_info[i].ps_pages); | ||
2217 | |||
2218 | vfree(rx_ring->buffer_info); | ||
2219 | rx_ring->buffer_info = NULL; | ||
2220 | |||
2221 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | ||
2222 | rx_ring->dma); | ||
2223 | rx_ring->desc = NULL; | ||
2224 | } | ||
2225 | |||
2226 | /** | ||
2227 | * e1000_update_itr - update the dynamic ITR value based on statistics | ||
2228 | * @adapter: pointer to adapter | ||
2229 | * @itr_setting: current adapter->itr | ||
2230 | * @packets: the number of packets during this measurement interval | ||
2231 | * @bytes: the number of bytes during this measurement interval | ||
2232 | * | ||
2233 | * Stores a new ITR value based on packets and byte | ||
2234 | * counts during the last interrupt. The advantage of per interrupt | ||
2235 | * computation is faster updates and more accurate ITR for the current | ||
2236 | * traffic pattern. Constants in this function were computed | ||
2237 | * based on theoretical maximum wire speed and thresholds were set based | ||
2238 | * on testing data as well as attempting to minimize response time | ||
2239 | * while increasing bulk throughput. This functionality is controlled | ||
2240 | * by the InterruptThrottleRate module parameter. | ||
2241 | **/ | ||
2242 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | ||
2243 | u16 itr_setting, int packets, | ||
2244 | int bytes) | ||
2245 | { | ||
2246 | unsigned int retval = itr_setting; | ||
2247 | |||
2248 | if (packets == 0) | ||
2249 | goto update_itr_done; | ||
2250 | |||
2251 | switch (itr_setting) { | ||
2252 | case lowest_latency: | ||
2253 | /* handle TSO and jumbo frames */ | ||
2254 | if (bytes/packets > 8000) | ||
2255 | retval = bulk_latency; | ||
2256 | else if ((packets < 5) && (bytes > 512)) | ||
2257 | retval = low_latency; | ||
2258 | break; | ||
2259 | case low_latency: /* 50 usec aka 20000 ints/s */ | ||
2260 | if (bytes > 10000) { | ||
2261 | /* this if handles the TSO accounting */ | ||
2262 | if (bytes/packets > 8000) | ||
2263 | retval = bulk_latency; | ||
2264 | else if ((packets < 10) || ((bytes/packets) > 1200)) | ||
2265 | retval = bulk_latency; | ||
2266 | else if ((packets > 35)) | ||
2267 | retval = lowest_latency; | ||
2268 | } else if (bytes/packets > 2000) { | ||
2269 | retval = bulk_latency; | ||
2270 | } else if (packets <= 2 && bytes < 512) { | ||
2271 | retval = lowest_latency; | ||
2272 | } | ||
2273 | break; | ||
2274 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | ||
2275 | if (bytes > 25000) { | ||
2276 | if (packets > 35) | ||
2277 | retval = low_latency; | ||
2278 | } else if (bytes < 6000) { | ||
2279 | retval = low_latency; | ||
2280 | } | ||
2281 | break; | ||
2282 | } | ||
2283 | |||
2284 | update_itr_done: | ||
2285 | return retval; | ||
2286 | } | ||
2287 | |||
2288 | static void e1000_set_itr(struct e1000_adapter *adapter) | ||
2289 | { | ||
2290 | struct e1000_hw *hw = &adapter->hw; | ||
2291 | u16 current_itr; | ||
2292 | u32 new_itr = adapter->itr; | ||
2293 | |||
2294 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | ||
2295 | if (adapter->link_speed != SPEED_1000) { | ||
2296 | current_itr = 0; | ||
2297 | new_itr = 4000; | ||
2298 | goto set_itr_now; | ||
2299 | } | ||
2300 | |||
2301 | if (adapter->flags2 & FLAG2_DISABLE_AIM) { | ||
2302 | new_itr = 0; | ||
2303 | goto set_itr_now; | ||
2304 | } | ||
2305 | |||
2306 | adapter->tx_itr = e1000_update_itr(adapter, | ||
2307 | adapter->tx_itr, | ||
2308 | adapter->total_tx_packets, | ||
2309 | adapter->total_tx_bytes); | ||
2310 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2311 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | ||
2312 | adapter->tx_itr = low_latency; | ||
2313 | |||
2314 | adapter->rx_itr = e1000_update_itr(adapter, | ||
2315 | adapter->rx_itr, | ||
2316 | adapter->total_rx_packets, | ||
2317 | adapter->total_rx_bytes); | ||
2318 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2319 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | ||
2320 | adapter->rx_itr = low_latency; | ||
2321 | |||
2322 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
2323 | |||
2324 | switch (current_itr) { | ||
2325 | /* counts and packets in update_itr are dependent on these numbers */ | ||
2326 | case lowest_latency: | ||
2327 | new_itr = 70000; | ||
2328 | break; | ||
2329 | case low_latency: | ||
2330 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
2331 | break; | ||
2332 | case bulk_latency: | ||
2333 | new_itr = 4000; | ||
2334 | break; | ||
2335 | default: | ||
2336 | break; | ||
2337 | } | ||
2338 | |||
2339 | set_itr_now: | ||
2340 | if (new_itr != adapter->itr) { | ||
2341 | /* | ||
2342 | * this attempts to bias the interrupt rate towards Bulk | ||
2343 | * by adding intermediate steps when interrupt rate is | ||
2344 | * increasing | ||
2345 | */ | ||
2346 | new_itr = new_itr > adapter->itr ? | ||
2347 | min(adapter->itr + (new_itr >> 2), new_itr) : | ||
2348 | new_itr; | ||
2349 | adapter->itr = new_itr; | ||
2350 | adapter->rx_ring->itr_val = new_itr; | ||
2351 | if (adapter->msix_entries) | ||
2352 | adapter->rx_ring->set_itr = 1; | ||
2353 | else | ||
2354 | if (new_itr) | ||
2355 | ew32(ITR, 1000000000 / (new_itr * 256)); | ||
2356 | else | ||
2357 | ew32(ITR, 0); | ||
2358 | } | ||
2359 | } | ||
2360 | |||
2361 | /** | ||
2362 | * e1000_alloc_queues - Allocate memory for all rings | ||
2363 | * @adapter: board private structure to initialize | ||
2364 | **/ | ||
2365 | static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) | ||
2366 | { | ||
2367 | adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | ||
2368 | if (!adapter->tx_ring) | ||
2369 | goto err; | ||
2370 | |||
2371 | adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | ||
2372 | if (!adapter->rx_ring) | ||
2373 | goto err; | ||
2374 | |||
2375 | return 0; | ||
2376 | err: | ||
2377 | e_err("Unable to allocate memory for queues\n"); | ||
2378 | kfree(adapter->rx_ring); | ||
2379 | kfree(adapter->tx_ring); | ||
2380 | return -ENOMEM; | ||
2381 | } | ||
2382 | |||
2383 | /** | ||
2384 | * e1000_clean - NAPI Rx polling callback | ||
2385 | * @napi: struct associated with this polling callback | ||
2386 | * @budget: amount of packets driver is allowed to process this poll | ||
2387 | **/ | ||
2388 | static int e1000_clean(struct napi_struct *napi, int budget) | ||
2389 | { | ||
2390 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | ||
2391 | struct e1000_hw *hw = &adapter->hw; | ||
2392 | struct net_device *poll_dev = adapter->netdev; | ||
2393 | int tx_cleaned = 1, work_done = 0; | ||
2394 | |||
2395 | adapter = netdev_priv(poll_dev); | ||
2396 | |||
2397 | if (adapter->msix_entries && | ||
2398 | !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) | ||
2399 | goto clean_rx; | ||
2400 | |||
2401 | tx_cleaned = e1000_clean_tx_irq(adapter); | ||
2402 | |||
2403 | clean_rx: | ||
2404 | adapter->clean_rx(adapter, &work_done, budget); | ||
2405 | |||
2406 | if (!tx_cleaned) | ||
2407 | work_done = budget; | ||
2408 | |||
2409 | /* If budget not fully consumed, exit the polling mode */ | ||
2410 | if (work_done < budget) { | ||
2411 | if (adapter->itr_setting & 3) | ||
2412 | e1000_set_itr(adapter); | ||
2413 | napi_complete(napi); | ||
2414 | if (!test_bit(__E1000_DOWN, &adapter->state)) { | ||
2415 | if (adapter->msix_entries) | ||
2416 | ew32(IMS, adapter->rx_ring->ims_val); | ||
2417 | else | ||
2418 | e1000_irq_enable(adapter); | ||
2419 | } | ||
2420 | } | ||
2421 | |||
2422 | return work_done; | ||
2423 | } | ||
2424 | |||
2425 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
2426 | { | ||
2427 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
2428 | struct e1000_hw *hw = &adapter->hw; | ||
2429 | u32 vfta, index; | ||
2430 | |||
2431 | /* don't update vlan cookie if already programmed */ | ||
2432 | if ((adapter->hw.mng_cookie.status & | ||
2433 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | ||
2434 | (vid == adapter->mng_vlan_id)) | ||
2435 | return; | ||
2436 | |||
2437 | /* add VID to filter table */ | ||
2438 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | ||
2439 | index = (vid >> 5) & 0x7F; | ||
2440 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | ||
2441 | vfta |= (1 << (vid & 0x1F)); | ||
2442 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2443 | } | ||
2444 | |||
2445 | set_bit(vid, adapter->active_vlans); | ||
2446 | } | ||
2447 | |||
2448 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
2449 | { | ||
2450 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
2451 | struct e1000_hw *hw = &adapter->hw; | ||
2452 | u32 vfta, index; | ||
2453 | |||
2454 | if ((adapter->hw.mng_cookie.status & | ||
2455 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | ||
2456 | (vid == adapter->mng_vlan_id)) { | ||
2457 | /* release control to f/w */ | ||
2458 | e1000e_release_hw_control(adapter); | ||
2459 | return; | ||
2460 | } | ||
2461 | |||
2462 | /* remove VID from filter table */ | ||
2463 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | ||
2464 | index = (vid >> 5) & 0x7F; | ||
2465 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | ||
2466 | vfta &= ~(1 << (vid & 0x1F)); | ||
2467 | hw->mac.ops.write_vfta(hw, index, vfta); | ||
2468 | } | ||
2469 | |||
2470 | clear_bit(vid, adapter->active_vlans); | ||
2471 | } | ||
2472 | |||
2473 | /** | ||
2474 | * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering | ||
2475 | * @adapter: board private structure to initialize | ||
2476 | **/ | ||
2477 | static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) | ||
2478 | { | ||
2479 | struct net_device *netdev = adapter->netdev; | ||
2480 | struct e1000_hw *hw = &adapter->hw; | ||
2481 | u32 rctl; | ||
2482 | |||
2483 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | ||
2484 | /* disable VLAN receive filtering */ | ||
2485 | rctl = er32(RCTL); | ||
2486 | rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); | ||
2487 | ew32(RCTL, rctl); | ||
2488 | |||
2489 | if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { | ||
2490 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | ||
2491 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | ||
2492 | } | ||
2493 | } | ||
2494 | } | ||
2495 | |||
2496 | /** | ||
2497 | * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering | ||
2498 | * @adapter: board private structure to initialize | ||
2499 | **/ | ||
2500 | static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) | ||
2501 | { | ||
2502 | struct e1000_hw *hw = &adapter->hw; | ||
2503 | u32 rctl; | ||
2504 | |||
2505 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | ||
2506 | /* enable VLAN receive filtering */ | ||
2507 | rctl = er32(RCTL); | ||
2508 | rctl |= E1000_RCTL_VFE; | ||
2509 | rctl &= ~E1000_RCTL_CFIEN; | ||
2510 | ew32(RCTL, rctl); | ||
2511 | } | ||
2512 | } | ||
2513 | |||
2514 | /** | ||
2515 | * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping | ||
2516 | * @adapter: board private structure to initialize | ||
2517 | **/ | ||
2518 | static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) | ||
2519 | { | ||
2520 | struct e1000_hw *hw = &adapter->hw; | ||
2521 | u32 ctrl; | ||
2522 | |||
2523 | /* disable VLAN tag insert/strip */ | ||
2524 | ctrl = er32(CTRL); | ||
2525 | ctrl &= ~E1000_CTRL_VME; | ||
2526 | ew32(CTRL, ctrl); | ||
2527 | } | ||
2528 | |||
2529 | /** | ||
2530 | * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping | ||
2531 | * @adapter: board private structure to initialize | ||
2532 | **/ | ||
2533 | static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) | ||
2534 | { | ||
2535 | struct e1000_hw *hw = &adapter->hw; | ||
2536 | u32 ctrl; | ||
2537 | |||
2538 | /* enable VLAN tag insert/strip */ | ||
2539 | ctrl = er32(CTRL); | ||
2540 | ctrl |= E1000_CTRL_VME; | ||
2541 | ew32(CTRL, ctrl); | ||
2542 | } | ||
2543 | |||
2544 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | ||
2545 | { | ||
2546 | struct net_device *netdev = adapter->netdev; | ||
2547 | u16 vid = adapter->hw.mng_cookie.vlan_id; | ||
2548 | u16 old_vid = adapter->mng_vlan_id; | ||
2549 | |||
2550 | if (adapter->hw.mng_cookie.status & | ||
2551 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | ||
2552 | e1000_vlan_rx_add_vid(netdev, vid); | ||
2553 | adapter->mng_vlan_id = vid; | ||
2554 | } | ||
2555 | |||
2556 | if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) | ||
2557 | e1000_vlan_rx_kill_vid(netdev, old_vid); | ||
2558 | } | ||
2559 | |||
2560 | static void e1000_restore_vlan(struct e1000_adapter *adapter) | ||
2561 | { | ||
2562 | u16 vid; | ||
2563 | |||
2564 | e1000_vlan_rx_add_vid(adapter->netdev, 0); | ||
2565 | |||
2566 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) | ||
2567 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | ||
2568 | } | ||
2569 | |||
2570 | static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | ||
2571 | { | ||
2572 | struct e1000_hw *hw = &adapter->hw; | ||
2573 | u32 manc, manc2h, mdef, i, j; | ||
2574 | |||
2575 | if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) | ||
2576 | return; | ||
2577 | |||
2578 | manc = er32(MANC); | ||
2579 | |||
2580 | /* | ||
2581 | * enable receiving management packets to the host. this will probably | ||
2582 | * generate destination unreachable messages from the host OS, but | ||
2583 | * the packets will be handled on SMBUS | ||
2584 | */ | ||
2585 | manc |= E1000_MANC_EN_MNG2HOST; | ||
2586 | manc2h = er32(MANC2H); | ||
2587 | |||
2588 | switch (hw->mac.type) { | ||
2589 | default: | ||
2590 | manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); | ||
2591 | break; | ||
2592 | case e1000_82574: | ||
2593 | case e1000_82583: | ||
2594 | /* | ||
2595 | * Check if IPMI pass-through decision filter already exists; | ||
2596 | * if so, enable it. | ||
2597 | */ | ||
2598 | for (i = 0, j = 0; i < 8; i++) { | ||
2599 | mdef = er32(MDEF(i)); | ||
2600 | |||
2601 | /* Ignore filters with anything other than IPMI ports */ | ||
2602 | if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) | ||
2603 | continue; | ||
2604 | |||
2605 | /* Enable this decision filter in MANC2H */ | ||
2606 | if (mdef) | ||
2607 | manc2h |= (1 << i); | ||
2608 | |||
2609 | j |= mdef; | ||
2610 | } | ||
2611 | |||
2612 | if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) | ||
2613 | break; | ||
2614 | |||
2615 | /* Create new decision filter in an empty filter */ | ||
2616 | for (i = 0, j = 0; i < 8; i++) | ||
2617 | if (er32(MDEF(i)) == 0) { | ||
2618 | ew32(MDEF(i), (E1000_MDEF_PORT_623 | | ||
2619 | E1000_MDEF_PORT_664)); | ||
2620 | manc2h |= (1 << 1); | ||
2621 | j++; | ||
2622 | break; | ||
2623 | } | ||
2624 | |||
2625 | if (!j) | ||
2626 | e_warn("Unable to create IPMI pass-through filter\n"); | ||
2627 | break; | ||
2628 | } | ||
2629 | |||
2630 | ew32(MANC2H, manc2h); | ||
2631 | ew32(MANC, manc); | ||
2632 | } | ||
2633 | |||
2634 | /** | ||
2635 | * e1000_configure_tx - Configure Transmit Unit after Reset | ||
2636 | * @adapter: board private structure | ||
2637 | * | ||
2638 | * Configure the Tx unit of the MAC after a reset. | ||
2639 | **/ | ||
2640 | static void e1000_configure_tx(struct e1000_adapter *adapter) | ||
2641 | { | ||
2642 | struct e1000_hw *hw = &adapter->hw; | ||
2643 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
2644 | u64 tdba; | ||
2645 | u32 tdlen, tctl, tipg, tarc; | ||
2646 | u32 ipgr1, ipgr2; | ||
2647 | |||
2648 | /* Setup the HW Tx Head and Tail descriptor pointers */ | ||
2649 | tdba = tx_ring->dma; | ||
2650 | tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); | ||
2651 | ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); | ||
2652 | ew32(TDBAH, (tdba >> 32)); | ||
2653 | ew32(TDLEN, tdlen); | ||
2654 | ew32(TDH, 0); | ||
2655 | ew32(TDT, 0); | ||
2656 | tx_ring->head = E1000_TDH; | ||
2657 | tx_ring->tail = E1000_TDT; | ||
2658 | |||
2659 | /* Set the default values for the Tx Inter Packet Gap timer */ | ||
2660 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ | ||
2661 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ | ||
2662 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ | ||
2663 | |||
2664 | if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) | ||
2665 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ | ||
2666 | |||
2667 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | ||
2668 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | ||
2669 | ew32(TIPG, tipg); | ||
2670 | |||
2671 | /* Set the Tx Interrupt Delay register */ | ||
2672 | ew32(TIDV, adapter->tx_int_delay); | ||
2673 | /* Tx irq moderation */ | ||
2674 | ew32(TADV, adapter->tx_abs_int_delay); | ||
2675 | |||
2676 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
2677 | u32 txdctl = er32(TXDCTL(0)); | ||
2678 | txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | | ||
2679 | E1000_TXDCTL_WTHRESH); | ||
2680 | /* | ||
2681 | * set up some performance related parameters to encourage the | ||
2682 | * hardware to use the bus more efficiently in bursts, depends | ||
2683 | * on the tx_int_delay to be enabled, | ||
2684 | * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time | ||
2685 | * hthresh = 1 ==> prefetch when one or more available | ||
2686 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less | ||
2687 | * BEWARE: this seems to work but should be considered first if | ||
2688 | * there are Tx hangs or other Tx related bugs | ||
2689 | */ | ||
2690 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; | ||
2691 | ew32(TXDCTL(0), txdctl); | ||
2692 | /* erratum work around: set txdctl the same for both queues */ | ||
2693 | ew32(TXDCTL(1), txdctl); | ||
2694 | } | ||
2695 | |||
2696 | /* Program the Transmit Control Register */ | ||
2697 | tctl = er32(TCTL); | ||
2698 | tctl &= ~E1000_TCTL_CT; | ||
2699 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | ||
2700 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | ||
2701 | |||
2702 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | ||
2703 | tarc = er32(TARC(0)); | ||
2704 | /* | ||
2705 | * set the speed mode bit, we'll clear it if we're not at | ||
2706 | * gigabit link later | ||
2707 | */ | ||
2708 | #define SPEED_MODE_BIT (1 << 21) | ||
2709 | tarc |= SPEED_MODE_BIT; | ||
2710 | ew32(TARC(0), tarc); | ||
2711 | } | ||
2712 | |||
2713 | /* errata: program both queues to unweighted RR */ | ||
2714 | if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { | ||
2715 | tarc = er32(TARC(0)); | ||
2716 | tarc |= 1; | ||
2717 | ew32(TARC(0), tarc); | ||
2718 | tarc = er32(TARC(1)); | ||
2719 | tarc |= 1; | ||
2720 | ew32(TARC(1), tarc); | ||
2721 | } | ||
2722 | |||
2723 | /* Setup Transmit Descriptor Settings for eop descriptor */ | ||
2724 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; | ||
2725 | |||
2726 | /* only set IDE if we are delaying interrupts using the timers */ | ||
2727 | if (adapter->tx_int_delay) | ||
2728 | adapter->txd_cmd |= E1000_TXD_CMD_IDE; | ||
2729 | |||
2730 | /* enable Report Status bit */ | ||
2731 | adapter->txd_cmd |= E1000_TXD_CMD_RS; | ||
2732 | |||
2733 | ew32(TCTL, tctl); | ||
2734 | |||
2735 | e1000e_config_collision_dist(hw); | ||
2736 | } | ||
2737 | |||
2738 | /** | ||
2739 | * e1000_setup_rctl - configure the receive control registers | ||
2740 | * @adapter: Board private structure | ||
2741 | **/ | ||
2742 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | ||
2743 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | ||
2744 | static void e1000_setup_rctl(struct e1000_adapter *adapter) | ||
2745 | { | ||
2746 | struct e1000_hw *hw = &adapter->hw; | ||
2747 | u32 rctl, rfctl; | ||
2748 | u32 pages = 0; | ||
2749 | |||
2750 | /* Workaround Si errata on 82579 - configure jumbo frame flow */ | ||
2751 | if (hw->mac.type == e1000_pch2lan) { | ||
2752 | s32 ret_val; | ||
2753 | |||
2754 | if (adapter->netdev->mtu > ETH_DATA_LEN) | ||
2755 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | ||
2756 | else | ||
2757 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | ||
2758 | |||
2759 | if (ret_val) | ||
2760 | e_dbg("failed to enable jumbo frame workaround mode\n"); | ||
2761 | } | ||
2762 | |||
2763 | /* Program MC offset vector base */ | ||
2764 | rctl = er32(RCTL); | ||
2765 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | ||
2766 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | ||
2767 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | ||
2768 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | ||
2769 | |||
2770 | /* Do not Store bad packets */ | ||
2771 | rctl &= ~E1000_RCTL_SBP; | ||
2772 | |||
2773 | /* Enable Long Packet receive */ | ||
2774 | if (adapter->netdev->mtu <= ETH_DATA_LEN) | ||
2775 | rctl &= ~E1000_RCTL_LPE; | ||
2776 | else | ||
2777 | rctl |= E1000_RCTL_LPE; | ||
2778 | |||
2779 | /* Some systems expect that the CRC is included in SMBUS traffic. The | ||
2780 | * hardware strips the CRC before sending to both SMBUS (BMC) and to | ||
2781 | * host memory when this is enabled | ||
2782 | */ | ||
2783 | if (adapter->flags2 & FLAG2_CRC_STRIPPING) | ||
2784 | rctl |= E1000_RCTL_SECRC; | ||
2785 | |||
2786 | /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ | ||
2787 | if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { | ||
2788 | u16 phy_data; | ||
2789 | |||
2790 | e1e_rphy(hw, PHY_REG(770, 26), &phy_data); | ||
2791 | phy_data &= 0xfff8; | ||
2792 | phy_data |= (1 << 2); | ||
2793 | e1e_wphy(hw, PHY_REG(770, 26), phy_data); | ||
2794 | |||
2795 | e1e_rphy(hw, 22, &phy_data); | ||
2796 | phy_data &= 0x0fff; | ||
2797 | phy_data |= (1 << 14); | ||
2798 | e1e_wphy(hw, 0x10, 0x2823); | ||
2799 | e1e_wphy(hw, 0x11, 0x0003); | ||
2800 | e1e_wphy(hw, 22, phy_data); | ||
2801 | } | ||
2802 | |||
2803 | /* Setup buffer sizes */ | ||
2804 | rctl &= ~E1000_RCTL_SZ_4096; | ||
2805 | rctl |= E1000_RCTL_BSEX; | ||
2806 | switch (adapter->rx_buffer_len) { | ||
2807 | case 2048: | ||
2808 | default: | ||
2809 | rctl |= E1000_RCTL_SZ_2048; | ||
2810 | rctl &= ~E1000_RCTL_BSEX; | ||
2811 | break; | ||
2812 | case 4096: | ||
2813 | rctl |= E1000_RCTL_SZ_4096; | ||
2814 | break; | ||
2815 | case 8192: | ||
2816 | rctl |= E1000_RCTL_SZ_8192; | ||
2817 | break; | ||
2818 | case 16384: | ||
2819 | rctl |= E1000_RCTL_SZ_16384; | ||
2820 | break; | ||
2821 | } | ||
2822 | |||
2823 | /* | ||
2824 | * 82571 and greater support packet-split where the protocol | ||
2825 | * header is placed in skb->data and the packet data is | ||
2826 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. | ||
2827 | * In the case of a non-split, skb->data is linearly filled, | ||
2828 | * followed by the page buffers. Therefore, skb->data is | ||
2829 | * sized to hold the largest protocol header. | ||
2830 | * | ||
2831 | * allocations using alloc_page take too long for regular MTU | ||
2832 | * so only enable packet split for jumbo frames | ||
2833 | * | ||
2834 | * Using pages when the page size is greater than 16k wastes | ||
2835 | * a lot of memory, since we allocate 3 pages at all times | ||
2836 | * per packet. | ||
2837 | */ | ||
2838 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); | ||
2839 | if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && | ||
2840 | (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | ||
2841 | adapter->rx_ps_pages = pages; | ||
2842 | else | ||
2843 | adapter->rx_ps_pages = 0; | ||
2844 | |||
2845 | if (adapter->rx_ps_pages) { | ||
2846 | u32 psrctl = 0; | ||
2847 | |||
2848 | /* Configure extra packet-split registers */ | ||
2849 | rfctl = er32(RFCTL); | ||
2850 | rfctl |= E1000_RFCTL_EXTEN; | ||
2851 | /* | ||
2852 | * disable packet split support for IPv6 extension headers, | ||
2853 | * because some malformed IPv6 headers can hang the Rx | ||
2854 | */ | ||
2855 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | ||
2856 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | ||
2857 | |||
2858 | ew32(RFCTL, rfctl); | ||
2859 | |||
2860 | /* Enable Packet split descriptors */ | ||
2861 | rctl |= E1000_RCTL_DTYP_PS; | ||
2862 | |||
2863 | psrctl |= adapter->rx_ps_bsize0 >> | ||
2864 | E1000_PSRCTL_BSIZE0_SHIFT; | ||
2865 | |||
2866 | switch (adapter->rx_ps_pages) { | ||
2867 | case 3: | ||
2868 | psrctl |= PAGE_SIZE << | ||
2869 | E1000_PSRCTL_BSIZE3_SHIFT; | ||
2870 | case 2: | ||
2871 | psrctl |= PAGE_SIZE << | ||
2872 | E1000_PSRCTL_BSIZE2_SHIFT; | ||
2873 | case 1: | ||
2874 | psrctl |= PAGE_SIZE >> | ||
2875 | E1000_PSRCTL_BSIZE1_SHIFT; | ||
2876 | break; | ||
2877 | } | ||
2878 | |||
2879 | ew32(PSRCTL, psrctl); | ||
2880 | } | ||
2881 | |||
2882 | ew32(RCTL, rctl); | ||
2883 | /* just started the receive unit, no need to restart */ | ||
2884 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | ||
2885 | } | ||
2886 | |||
2887 | /** | ||
2888 | * e1000_configure_rx - Configure Receive Unit after Reset | ||
2889 | * @adapter: board private structure | ||
2890 | * | ||
2891 | * Configure the Rx unit of the MAC after a reset. | ||
2892 | **/ | ||
2893 | static void e1000_configure_rx(struct e1000_adapter *adapter) | ||
2894 | { | ||
2895 | struct e1000_hw *hw = &adapter->hw; | ||
2896 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
2897 | u64 rdba; | ||
2898 | u32 rdlen, rctl, rxcsum, ctrl_ext; | ||
2899 | |||
2900 | if (adapter->rx_ps_pages) { | ||
2901 | /* this is a 32 byte descriptor */ | ||
2902 | rdlen = rx_ring->count * | ||
2903 | sizeof(union e1000_rx_desc_packet_split); | ||
2904 | adapter->clean_rx = e1000_clean_rx_irq_ps; | ||
2905 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | ||
2906 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { | ||
2907 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); | ||
2908 | adapter->clean_rx = e1000_clean_jumbo_rx_irq; | ||
2909 | adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; | ||
2910 | } else { | ||
2911 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); | ||
2912 | adapter->clean_rx = e1000_clean_rx_irq; | ||
2913 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | ||
2914 | } | ||
2915 | |||
2916 | /* disable receives while setting up the descriptors */ | ||
2917 | rctl = er32(RCTL); | ||
2918 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
2919 | e1e_flush(); | ||
2920 | usleep_range(10000, 20000); | ||
2921 | |||
2922 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
2923 | /* | ||
2924 | * set the writeback threshold (only takes effect if the RDTR | ||
2925 | * is set). set GRAN=1 and write back up to 0x4 worth, and | ||
2926 | * enable prefetching of 0x20 Rx descriptors | ||
2927 | * granularity = 01 | ||
2928 | * wthresh = 04, | ||
2929 | * hthresh = 04, | ||
2930 | * pthresh = 0x20 | ||
2931 | */ | ||
2932 | ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); | ||
2933 | ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); | ||
2934 | |||
2935 | /* | ||
2936 | * override the delay timers for enabling bursting, only if | ||
2937 | * the value was not set by the user via module options | ||
2938 | */ | ||
2939 | if (adapter->rx_int_delay == DEFAULT_RDTR) | ||
2940 | adapter->rx_int_delay = BURST_RDTR; | ||
2941 | if (adapter->rx_abs_int_delay == DEFAULT_RADV) | ||
2942 | adapter->rx_abs_int_delay = BURST_RADV; | ||
2943 | } | ||
2944 | |||
2945 | /* set the Receive Delay Timer Register */ | ||
2946 | ew32(RDTR, adapter->rx_int_delay); | ||
2947 | |||
2948 | /* irq moderation */ | ||
2949 | ew32(RADV, adapter->rx_abs_int_delay); | ||
2950 | if ((adapter->itr_setting != 0) && (adapter->itr != 0)) | ||
2951 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | ||
2952 | |||
2953 | ctrl_ext = er32(CTRL_EXT); | ||
2954 | /* Auto-Mask interrupts upon ICR access */ | ||
2955 | ctrl_ext |= E1000_CTRL_EXT_IAME; | ||
2956 | ew32(IAM, 0xffffffff); | ||
2957 | ew32(CTRL_EXT, ctrl_ext); | ||
2958 | e1e_flush(); | ||
2959 | |||
2960 | /* | ||
2961 | * Setup the HW Rx Head and Tail Descriptor Pointers and | ||
2962 | * the Base and Length of the Rx Descriptor Ring | ||
2963 | */ | ||
2964 | rdba = rx_ring->dma; | ||
2965 | ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); | ||
2966 | ew32(RDBAH, (rdba >> 32)); | ||
2967 | ew32(RDLEN, rdlen); | ||
2968 | ew32(RDH, 0); | ||
2969 | ew32(RDT, 0); | ||
2970 | rx_ring->head = E1000_RDH; | ||
2971 | rx_ring->tail = E1000_RDT; | ||
2972 | |||
2973 | /* Enable Receive Checksum Offload for TCP and UDP */ | ||
2974 | rxcsum = er32(RXCSUM); | ||
2975 | if (adapter->flags & FLAG_RX_CSUM_ENABLED) { | ||
2976 | rxcsum |= E1000_RXCSUM_TUOFL; | ||
2977 | |||
2978 | /* | ||
2979 | * IPv4 payload checksum for UDP fragments must be | ||
2980 | * used in conjunction with packet-split. | ||
2981 | */ | ||
2982 | if (adapter->rx_ps_pages) | ||
2983 | rxcsum |= E1000_RXCSUM_IPPCSE; | ||
2984 | } else { | ||
2985 | rxcsum &= ~E1000_RXCSUM_TUOFL; | ||
2986 | /* no need to clear IPPCSE as it defaults to 0 */ | ||
2987 | } | ||
2988 | ew32(RXCSUM, rxcsum); | ||
2989 | |||
2990 | /* | ||
2991 | * Enable early receives on supported devices, only takes effect when | ||
2992 | * packet size is equal or larger than the specified value (in 8 byte | ||
2993 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | ||
2994 | */ | ||
2995 | if ((adapter->flags & FLAG_HAS_ERT) || | ||
2996 | (adapter->hw.mac.type == e1000_pch2lan)) { | ||
2997 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | ||
2998 | u32 rxdctl = er32(RXDCTL(0)); | ||
2999 | ew32(RXDCTL(0), rxdctl | 0x3); | ||
3000 | if (adapter->flags & FLAG_HAS_ERT) | ||
3001 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | ||
3002 | /* | ||
3003 | * With jumbo frames and early-receive enabled, | ||
3004 | * excessive C-state transition latencies result in | ||
3005 | * dropped transactions. | ||
3006 | */ | ||
3007 | pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); | ||
3008 | } else { | ||
3009 | pm_qos_update_request(&adapter->netdev->pm_qos_req, | ||
3010 | PM_QOS_DEFAULT_VALUE); | ||
3011 | } | ||
3012 | } | ||
3013 | |||
3014 | /* Enable Receives */ | ||
3015 | ew32(RCTL, rctl); | ||
3016 | } | ||
3017 | |||
3018 | /** | ||
3019 | * e1000_update_mc_addr_list - Update Multicast addresses | ||
3020 | * @hw: pointer to the HW structure | ||
3021 | * @mc_addr_list: array of multicast addresses to program | ||
3022 | * @mc_addr_count: number of multicast addresses to program | ||
3023 | * | ||
3024 | * Updates the Multicast Table Array. | ||
3025 | * The caller must have a packed mc_addr_list of multicast addresses. | ||
3026 | **/ | ||
3027 | static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, | ||
3028 | u32 mc_addr_count) | ||
3029 | { | ||
3030 | hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); | ||
3031 | } | ||
3032 | |||
3033 | /** | ||
3034 | * e1000_set_multi - Multicast and Promiscuous mode set | ||
3035 | * @netdev: network interface device structure | ||
3036 | * | ||
3037 | * The set_multi entry point is called whenever the multicast address | ||
3038 | * list or the network interface flags are updated. This routine is | ||
3039 | * responsible for configuring the hardware for proper multicast, | ||
3040 | * promiscuous mode, and all-multi behavior. | ||
3041 | **/ | ||
3042 | static void e1000_set_multi(struct net_device *netdev) | ||
3043 | { | ||
3044 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3045 | struct e1000_hw *hw = &adapter->hw; | ||
3046 | struct netdev_hw_addr *ha; | ||
3047 | u8 *mta_list; | ||
3048 | u32 rctl; | ||
3049 | |||
3050 | /* Check for Promiscuous and All Multicast modes */ | ||
3051 | |||
3052 | rctl = er32(RCTL); | ||
3053 | |||
3054 | if (netdev->flags & IFF_PROMISC) { | ||
3055 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | ||
3056 | rctl &= ~E1000_RCTL_VFE; | ||
3057 | /* Do not hardware filter VLANs in promisc mode */ | ||
3058 | e1000e_vlan_filter_disable(adapter); | ||
3059 | } else { | ||
3060 | if (netdev->flags & IFF_ALLMULTI) { | ||
3061 | rctl |= E1000_RCTL_MPE; | ||
3062 | rctl &= ~E1000_RCTL_UPE; | ||
3063 | } else { | ||
3064 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); | ||
3065 | } | ||
3066 | e1000e_vlan_filter_enable(adapter); | ||
3067 | } | ||
3068 | |||
3069 | ew32(RCTL, rctl); | ||
3070 | |||
3071 | if (!netdev_mc_empty(netdev)) { | ||
3072 | int i = 0; | ||
3073 | |||
3074 | mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); | ||
3075 | if (!mta_list) | ||
3076 | return; | ||
3077 | |||
3078 | /* prepare a packed array of only addresses. */ | ||
3079 | netdev_for_each_mc_addr(ha, netdev) | ||
3080 | memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); | ||
3081 | |||
3082 | e1000_update_mc_addr_list(hw, mta_list, i); | ||
3083 | kfree(mta_list); | ||
3084 | } else { | ||
3085 | /* | ||
3086 | * if we're called from probe, we might not have | ||
3087 | * anything to do here, so clear out the list | ||
3088 | */ | ||
3089 | e1000_update_mc_addr_list(hw, NULL, 0); | ||
3090 | } | ||
3091 | |||
3092 | if (netdev->features & NETIF_F_HW_VLAN_RX) | ||
3093 | e1000e_vlan_strip_enable(adapter); | ||
3094 | else | ||
3095 | e1000e_vlan_strip_disable(adapter); | ||
3096 | } | ||
3097 | |||
3098 | /** | ||
3099 | * e1000_configure - configure the hardware for Rx and Tx | ||
3100 | * @adapter: private board structure | ||
3101 | **/ | ||
3102 | static void e1000_configure(struct e1000_adapter *adapter) | ||
3103 | { | ||
3104 | e1000_set_multi(adapter->netdev); | ||
3105 | |||
3106 | e1000_restore_vlan(adapter); | ||
3107 | e1000_init_manageability_pt(adapter); | ||
3108 | |||
3109 | e1000_configure_tx(adapter); | ||
3110 | e1000_setup_rctl(adapter); | ||
3111 | e1000_configure_rx(adapter); | ||
3112 | adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), | ||
3113 | GFP_KERNEL); | ||
3114 | } | ||
3115 | |||
3116 | /** | ||
3117 | * e1000e_power_up_phy - restore link in case the phy was powered down | ||
3118 | * @adapter: address of board private structure | ||
3119 | * | ||
3120 | * The phy may be powered down to save power and turn off link when the | ||
3121 | * driver is unloaded and wake on lan is not enabled (among others) | ||
3122 | * *** this routine MUST be followed by a call to e1000e_reset *** | ||
3123 | **/ | ||
3124 | void e1000e_power_up_phy(struct e1000_adapter *adapter) | ||
3125 | { | ||
3126 | if (adapter->hw.phy.ops.power_up) | ||
3127 | adapter->hw.phy.ops.power_up(&adapter->hw); | ||
3128 | |||
3129 | adapter->hw.mac.ops.setup_link(&adapter->hw); | ||
3130 | } | ||
3131 | |||
3132 | /** | ||
3133 | * e1000_power_down_phy - Power down the PHY | ||
3134 | * | ||
3135 | * Power down the PHY so no link is implied when interface is down. | ||
3136 | * The PHY cannot be powered down if management or WoL is active. | ||
3137 | */ | ||
3138 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | ||
3139 | { | ||
3140 | /* WoL is enabled */ | ||
3141 | if (adapter->wol) | ||
3142 | return; | ||
3143 | |||
3144 | if (adapter->hw.phy.ops.power_down) | ||
3145 | adapter->hw.phy.ops.power_down(&adapter->hw); | ||
3146 | } | ||
3147 | |||
3148 | /** | ||
3149 | * e1000e_reset - bring the hardware into a known good state | ||
3150 | * | ||
3151 | * This function boots the hardware and enables some settings that | ||
3152 | * require a configuration cycle of the hardware - those cannot be | ||
3153 | * set/changed during runtime. After reset the device needs to be | ||
3154 | * properly configured for Rx, Tx etc. | ||
3155 | */ | ||
3156 | void e1000e_reset(struct e1000_adapter *adapter) | ||
3157 | { | ||
3158 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
3159 | struct e1000_fc_info *fc = &adapter->hw.fc; | ||
3160 | struct e1000_hw *hw = &adapter->hw; | ||
3161 | u32 tx_space, min_tx_space, min_rx_space; | ||
3162 | u32 pba = adapter->pba; | ||
3163 | u16 hwm; | ||
3164 | |||
3165 | /* reset Packet Buffer Allocation to default */ | ||
3166 | ew32(PBA, pba); | ||
3167 | |||
3168 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { | ||
3169 | /* | ||
3170 | * To maintain wire speed transmits, the Tx FIFO should be | ||
3171 | * large enough to accommodate two full transmit packets, | ||
3172 | * rounded up to the next 1KB and expressed in KB. Likewise, | ||
3173 | * the Rx FIFO should be large enough to accommodate at least | ||
3174 | * one full receive packet and is similarly rounded up and | ||
3175 | * expressed in KB. | ||
3176 | */ | ||
3177 | pba = er32(PBA); | ||
3178 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | ||
3179 | tx_space = pba >> 16; | ||
3180 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | ||
3181 | pba &= 0xffff; | ||
3182 | /* | ||
3183 | * the Tx fifo also stores 16 bytes of information about the Tx | ||
3184 | * but don't include ethernet FCS because hardware appends it | ||
3185 | */ | ||
3186 | min_tx_space = (adapter->max_frame_size + | ||
3187 | sizeof(struct e1000_tx_desc) - | ||
3188 | ETH_FCS_LEN) * 2; | ||
3189 | min_tx_space = ALIGN(min_tx_space, 1024); | ||
3190 | min_tx_space >>= 10; | ||
3191 | /* software strips receive CRC, so leave room for it */ | ||
3192 | min_rx_space = adapter->max_frame_size; | ||
3193 | min_rx_space = ALIGN(min_rx_space, 1024); | ||
3194 | min_rx_space >>= 10; | ||
3195 | |||
3196 | /* | ||
3197 | * If current Tx allocation is less than the min Tx FIFO size, | ||
3198 | * and the min Tx FIFO size is less than the current Rx FIFO | ||
3199 | * allocation, take space away from current Rx allocation | ||
3200 | */ | ||
3201 | if ((tx_space < min_tx_space) && | ||
3202 | ((min_tx_space - tx_space) < pba)) { | ||
3203 | pba -= min_tx_space - tx_space; | ||
3204 | |||
3205 | /* | ||
3206 | * if short on Rx space, Rx wins and must trump Tx | ||
3207 | * adjustment or use Early Receive if available | ||
3208 | */ | ||
3209 | if ((pba < min_rx_space) && | ||
3210 | (!(adapter->flags & FLAG_HAS_ERT))) | ||
3211 | /* ERT enabled in e1000_configure_rx */ | ||
3212 | pba = min_rx_space; | ||
3213 | } | ||
3214 | |||
3215 | ew32(PBA, pba); | ||
3216 | } | ||
3217 | |||
3218 | /* | ||
3219 | * flow control settings | ||
3220 | * | ||
3221 | * The high water mark must be low enough to fit one full frame | ||
3222 | * (or the size used for early receive) above it in the Rx FIFO. | ||
3223 | * Set it to the lower of: | ||
3224 | * - 90% of the Rx FIFO size, and | ||
3225 | * - the full Rx FIFO size minus the early receive size (for parts | ||
3226 | * with ERT support assuming ERT set to E1000_ERT_2048), or | ||
3227 | * - the full Rx FIFO size minus one full frame | ||
3228 | */ | ||
3229 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | ||
3230 | fc->pause_time = 0xFFFF; | ||
3231 | else | ||
3232 | fc->pause_time = E1000_FC_PAUSE_TIME; | ||
3233 | fc->send_xon = 1; | ||
3234 | fc->current_mode = fc->requested_mode; | ||
3235 | |||
3236 | switch (hw->mac.type) { | ||
3237 | default: | ||
3238 | if ((adapter->flags & FLAG_HAS_ERT) && | ||
3239 | (adapter->netdev->mtu > ETH_DATA_LEN)) | ||
3240 | hwm = min(((pba << 10) * 9 / 10), | ||
3241 | ((pba << 10) - (E1000_ERT_2048 << 3))); | ||
3242 | else | ||
3243 | hwm = min(((pba << 10) * 9 / 10), | ||
3244 | ((pba << 10) - adapter->max_frame_size)); | ||
3245 | |||
3246 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | ||
3247 | fc->low_water = fc->high_water - 8; | ||
3248 | break; | ||
3249 | case e1000_pchlan: | ||
3250 | /* | ||
3251 | * Workaround PCH LOM adapter hangs with certain network | ||
3252 | * loads. If hangs persist, try disabling Tx flow control. | ||
3253 | */ | ||
3254 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | ||
3255 | fc->high_water = 0x3500; | ||
3256 | fc->low_water = 0x1500; | ||
3257 | } else { | ||
3258 | fc->high_water = 0x5000; | ||
3259 | fc->low_water = 0x3000; | ||
3260 | } | ||
3261 | fc->refresh_time = 0x1000; | ||
3262 | break; | ||
3263 | case e1000_pch2lan: | ||
3264 | fc->high_water = 0x05C20; | ||
3265 | fc->low_water = 0x05048; | ||
3266 | fc->pause_time = 0x0650; | ||
3267 | fc->refresh_time = 0x0400; | ||
3268 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | ||
3269 | pba = 14; | ||
3270 | ew32(PBA, pba); | ||
3271 | } | ||
3272 | break; | ||
3273 | } | ||
3274 | |||
3275 | /* | ||
3276 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot | ||
3277 | * fit in receive buffer and early-receive not supported. | ||
3278 | */ | ||
3279 | if (adapter->itr_setting & 0x3) { | ||
3280 | if (((adapter->max_frame_size * 2) > (pba << 10)) && | ||
3281 | !(adapter->flags & FLAG_HAS_ERT)) { | ||
3282 | if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { | ||
3283 | dev_info(&adapter->pdev->dev, | ||
3284 | "Interrupt Throttle Rate turned off\n"); | ||
3285 | adapter->flags2 |= FLAG2_DISABLE_AIM; | ||
3286 | ew32(ITR, 0); | ||
3287 | } | ||
3288 | } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { | ||
3289 | dev_info(&adapter->pdev->dev, | ||
3290 | "Interrupt Throttle Rate turned on\n"); | ||
3291 | adapter->flags2 &= ~FLAG2_DISABLE_AIM; | ||
3292 | adapter->itr = 20000; | ||
3293 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | ||
3294 | } | ||
3295 | } | ||
3296 | |||
3297 | /* Allow time for pending master requests to run */ | ||
3298 | mac->ops.reset_hw(hw); | ||
3299 | |||
3300 | /* | ||
3301 | * For parts with AMT enabled, let the firmware know | ||
3302 | * that the network interface is in control | ||
3303 | */ | ||
3304 | if (adapter->flags & FLAG_HAS_AMT) | ||
3305 | e1000e_get_hw_control(adapter); | ||
3306 | |||
3307 | ew32(WUC, 0); | ||
3308 | |||
3309 | if (mac->ops.init_hw(hw)) | ||
3310 | e_err("Hardware Error\n"); | ||
3311 | |||
3312 | e1000_update_mng_vlan(adapter); | ||
3313 | |||
3314 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | ||
3315 | ew32(VET, ETH_P_8021Q); | ||
3316 | |||
3317 | e1000e_reset_adaptive(hw); | ||
3318 | |||
3319 | if (!netif_running(adapter->netdev) && | ||
3320 | !test_bit(__E1000_TESTING, &adapter->state)) { | ||
3321 | e1000_power_down_phy(adapter); | ||
3322 | return; | ||
3323 | } | ||
3324 | |||
3325 | e1000_get_phy_info(hw); | ||
3326 | |||
3327 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && | ||
3328 | !(adapter->flags & FLAG_SMART_POWER_DOWN)) { | ||
3329 | u16 phy_data = 0; | ||
3330 | /* | ||
3331 | * speed up time to link by disabling smart power down, ignore | ||
3332 | * the return value of this function because there is nothing | ||
3333 | * different we would do if it failed | ||
3334 | */ | ||
3335 | e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | ||
3336 | phy_data &= ~IGP02E1000_PM_SPD; | ||
3337 | e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | int e1000e_up(struct e1000_adapter *adapter) | ||
3342 | { | ||
3343 | struct e1000_hw *hw = &adapter->hw; | ||
3344 | |||
3345 | /* hardware has been reset, we need to reload some things */ | ||
3346 | e1000_configure(adapter); | ||
3347 | |||
3348 | clear_bit(__E1000_DOWN, &adapter->state); | ||
3349 | |||
3350 | napi_enable(&adapter->napi); | ||
3351 | if (adapter->msix_entries) | ||
3352 | e1000_configure_msix(adapter); | ||
3353 | e1000_irq_enable(adapter); | ||
3354 | |||
3355 | netif_start_queue(adapter->netdev); | ||
3356 | |||
3357 | /* fire a link change interrupt to start the watchdog */ | ||
3358 | if (adapter->msix_entries) | ||
3359 | ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); | ||
3360 | else | ||
3361 | ew32(ICS, E1000_ICS_LSC); | ||
3362 | |||
3363 | return 0; | ||
3364 | } | ||
3365 | |||
3366 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3367 | { | ||
3368 | struct e1000_hw *hw = &adapter->hw; | ||
3369 | |||
3370 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3371 | return; | ||
3372 | |||
3373 | /* flush pending descriptor writebacks to memory */ | ||
3374 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3375 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3376 | |||
3377 | /* execute the writes immediately */ | ||
3378 | e1e_flush(); | ||
3379 | } | ||
3380 | |||
3381 | static void e1000e_update_stats(struct e1000_adapter *adapter); | ||
3382 | |||
3383 | void e1000e_down(struct e1000_adapter *adapter) | ||
3384 | { | ||
3385 | struct net_device *netdev = adapter->netdev; | ||
3386 | struct e1000_hw *hw = &adapter->hw; | ||
3387 | u32 tctl, rctl; | ||
3388 | |||
3389 | /* | ||
3390 | * signal that we're down so the interrupt handler does not | ||
3391 | * reschedule our watchdog timer | ||
3392 | */ | ||
3393 | set_bit(__E1000_DOWN, &adapter->state); | ||
3394 | |||
3395 | /* disable receives in the hardware */ | ||
3396 | rctl = er32(RCTL); | ||
3397 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
3398 | /* flush and sleep below */ | ||
3399 | |||
3400 | netif_stop_queue(netdev); | ||
3401 | |||
3402 | /* disable transmits in the hardware */ | ||
3403 | tctl = er32(TCTL); | ||
3404 | tctl &= ~E1000_TCTL_EN; | ||
3405 | ew32(TCTL, tctl); | ||
3406 | /* flush both disables and wait for them to finish */ | ||
3407 | e1e_flush(); | ||
3408 | usleep_range(10000, 20000); | ||
3409 | |||
3410 | napi_disable(&adapter->napi); | ||
3411 | e1000_irq_disable(adapter); | ||
3412 | |||
3413 | del_timer_sync(&adapter->watchdog_timer); | ||
3414 | del_timer_sync(&adapter->phy_info_timer); | ||
3415 | |||
3416 | netif_carrier_off(netdev); | ||
3417 | |||
3418 | spin_lock(&adapter->stats64_lock); | ||
3419 | e1000e_update_stats(adapter); | ||
3420 | spin_unlock(&adapter->stats64_lock); | ||
3421 | |||
3422 | e1000e_flush_descriptors(adapter); | ||
3423 | e1000_clean_tx_ring(adapter); | ||
3424 | e1000_clean_rx_ring(adapter); | ||
3425 | |||
3426 | adapter->link_speed = 0; | ||
3427 | adapter->link_duplex = 0; | ||
3428 | |||
3429 | if (!pci_channel_offline(adapter->pdev)) | ||
3430 | e1000e_reset(adapter); | ||
3431 | |||
3432 | /* | ||
3433 | * TODO: for power management, we could drop the link and | ||
3434 | * pci_disable_device here. | ||
3435 | */ | ||
3436 | } | ||
3437 | |||
3438 | void e1000e_reinit_locked(struct e1000_adapter *adapter) | ||
3439 | { | ||
3440 | might_sleep(); | ||
3441 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
3442 | usleep_range(1000, 2000); | ||
3443 | e1000e_down(adapter); | ||
3444 | e1000e_up(adapter); | ||
3445 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
3446 | } | ||
3447 | |||
3448 | /** | ||
3449 | * e1000_sw_init - Initialize general software structures (struct e1000_adapter) | ||
3450 | * @adapter: board private structure to initialize | ||
3451 | * | ||
3452 | * e1000_sw_init initializes the Adapter private data structure. | ||
3453 | * Fields are initialized based on PCI device information and | ||
3454 | * OS network device settings (MTU size). | ||
3455 | **/ | ||
3456 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | ||
3457 | { | ||
3458 | struct net_device *netdev = adapter->netdev; | ||
3459 | |||
3460 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | ||
3461 | adapter->rx_ps_bsize0 = 128; | ||
3462 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
3463 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | ||
3464 | |||
3465 | spin_lock_init(&adapter->stats64_lock); | ||
3466 | |||
3467 | e1000e_set_interrupt_capability(adapter); | ||
3468 | |||
3469 | if (e1000_alloc_queues(adapter)) | ||
3470 | return -ENOMEM; | ||
3471 | |||
3472 | /* Explicitly disable IRQ since the NIC can be in any state. */ | ||
3473 | e1000_irq_disable(adapter); | ||
3474 | |||
3475 | set_bit(__E1000_DOWN, &adapter->state); | ||
3476 | return 0; | ||
3477 | } | ||
3478 | |||
3479 | /** | ||
3480 | * e1000_intr_msi_test - Interrupt Handler | ||
3481 | * @irq: interrupt number | ||
3482 | * @data: pointer to a network interface device structure | ||
3483 | **/ | ||
3484 | static irqreturn_t e1000_intr_msi_test(int irq, void *data) | ||
3485 | { | ||
3486 | struct net_device *netdev = data; | ||
3487 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3488 | struct e1000_hw *hw = &adapter->hw; | ||
3489 | u32 icr = er32(ICR); | ||
3490 | |||
3491 | e_dbg("icr is %08X\n", icr); | ||
3492 | if (icr & E1000_ICR_RXSEQ) { | ||
3493 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | ||
3494 | wmb(); | ||
3495 | } | ||
3496 | |||
3497 | return IRQ_HANDLED; | ||
3498 | } | ||
3499 | |||
3500 | /** | ||
3501 | * e1000_test_msi_interrupt - Returns 0 for successful test | ||
3502 | * @adapter: board private struct | ||
3503 | * | ||
3504 | * code flow taken from tg3.c | ||
3505 | **/ | ||
3506 | static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | ||
3507 | { | ||
3508 | struct net_device *netdev = adapter->netdev; | ||
3509 | struct e1000_hw *hw = &adapter->hw; | ||
3510 | int err; | ||
3511 | |||
3512 | /* poll_enable hasn't been called yet, so don't need disable */ | ||
3513 | /* clear any pending events */ | ||
3514 | er32(ICR); | ||
3515 | |||
3516 | /* free the real vector and request a test handler */ | ||
3517 | e1000_free_irq(adapter); | ||
3518 | e1000e_reset_interrupt_capability(adapter); | ||
3519 | |||
3520 | /* Assume that the test fails, if it succeeds then the test | ||
3521 | * MSI irq handler will unset this flag */ | ||
3522 | adapter->flags |= FLAG_MSI_TEST_FAILED; | ||
3523 | |||
3524 | err = pci_enable_msi(adapter->pdev); | ||
3525 | if (err) | ||
3526 | goto msi_test_failed; | ||
3527 | |||
3528 | err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, | ||
3529 | netdev->name, netdev); | ||
3530 | if (err) { | ||
3531 | pci_disable_msi(adapter->pdev); | ||
3532 | goto msi_test_failed; | ||
3533 | } | ||
3534 | |||
3535 | wmb(); | ||
3536 | |||
3537 | e1000_irq_enable(adapter); | ||
3538 | |||
3539 | /* fire an unusual interrupt on the test handler */ | ||
3540 | ew32(ICS, E1000_ICS_RXSEQ); | ||
3541 | e1e_flush(); | ||
3542 | msleep(50); | ||
3543 | |||
3544 | e1000_irq_disable(adapter); | ||
3545 | |||
3546 | rmb(); | ||
3547 | |||
3548 | if (adapter->flags & FLAG_MSI_TEST_FAILED) { | ||
3549 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | ||
3550 | e_info("MSI interrupt test failed, using legacy interrupt.\n"); | ||
3551 | } else | ||
3552 | e_dbg("MSI interrupt test succeeded!\n"); | ||
3553 | |||
3554 | free_irq(adapter->pdev->irq, netdev); | ||
3555 | pci_disable_msi(adapter->pdev); | ||
3556 | |||
3557 | msi_test_failed: | ||
3558 | e1000e_set_interrupt_capability(adapter); | ||
3559 | return e1000_request_irq(adapter); | ||
3560 | } | ||
3561 | |||
3562 | /** | ||
3563 | * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored | ||
3564 | * @adapter: board private struct | ||
3565 | * | ||
3566 | * code flow taken from tg3.c, called with e1000 interrupts disabled. | ||
3567 | **/ | ||
3568 | static int e1000_test_msi(struct e1000_adapter *adapter) | ||
3569 | { | ||
3570 | int err; | ||
3571 | u16 pci_cmd; | ||
3572 | |||
3573 | if (!(adapter->flags & FLAG_MSI_ENABLED)) | ||
3574 | return 0; | ||
3575 | |||
3576 | /* disable SERR in case the MSI write causes a master abort */ | ||
3577 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); | ||
3578 | if (pci_cmd & PCI_COMMAND_SERR) | ||
3579 | pci_write_config_word(adapter->pdev, PCI_COMMAND, | ||
3580 | pci_cmd & ~PCI_COMMAND_SERR); | ||
3581 | |||
3582 | err = e1000_test_msi_interrupt(adapter); | ||
3583 | |||
3584 | /* re-enable SERR */ | ||
3585 | if (pci_cmd & PCI_COMMAND_SERR) { | ||
3586 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); | ||
3587 | pci_cmd |= PCI_COMMAND_SERR; | ||
3588 | pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); | ||
3589 | } | ||
3590 | |||
3591 | return err; | ||
3592 | } | ||
3593 | |||
3594 | /** | ||
3595 | * e1000_open - Called when a network interface is made active | ||
3596 | * @netdev: network interface device structure | ||
3597 | * | ||
3598 | * Returns 0 on success, negative value on failure | ||
3599 | * | ||
3600 | * The open entry point is called when a network interface is made | ||
3601 | * active by the system (IFF_UP). At this point all resources needed | ||
3602 | * for transmit and receive operations are allocated, the interrupt | ||
3603 | * handler is registered with the OS, the watchdog timer is started, | ||
3604 | * and the stack is notified that the interface is ready. | ||
3605 | **/ | ||
3606 | static int e1000_open(struct net_device *netdev) | ||
3607 | { | ||
3608 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3609 | struct e1000_hw *hw = &adapter->hw; | ||
3610 | struct pci_dev *pdev = adapter->pdev; | ||
3611 | int err; | ||
3612 | |||
3613 | /* disallow open during test */ | ||
3614 | if (test_bit(__E1000_TESTING, &adapter->state)) | ||
3615 | return -EBUSY; | ||
3616 | |||
3617 | pm_runtime_get_sync(&pdev->dev); | ||
3618 | |||
3619 | netif_carrier_off(netdev); | ||
3620 | |||
3621 | /* allocate transmit descriptors */ | ||
3622 | err = e1000e_setup_tx_resources(adapter); | ||
3623 | if (err) | ||
3624 | goto err_setup_tx; | ||
3625 | |||
3626 | /* allocate receive descriptors */ | ||
3627 | err = e1000e_setup_rx_resources(adapter); | ||
3628 | if (err) | ||
3629 | goto err_setup_rx; | ||
3630 | |||
3631 | /* | ||
3632 | * If AMT is enabled, let the firmware know that the network | ||
3633 | * interface is now open and reset the part to a known state. | ||
3634 | */ | ||
3635 | if (adapter->flags & FLAG_HAS_AMT) { | ||
3636 | e1000e_get_hw_control(adapter); | ||
3637 | e1000e_reset(adapter); | ||
3638 | } | ||
3639 | |||
3640 | e1000e_power_up_phy(adapter); | ||
3641 | |||
3642 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | ||
3643 | if ((adapter->hw.mng_cookie.status & | ||
3644 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | ||
3645 | e1000_update_mng_vlan(adapter); | ||
3646 | |||
3647 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | ||
3648 | if ((adapter->flags & FLAG_HAS_ERT) || | ||
3649 | (adapter->hw.mac.type == e1000_pch2lan)) | ||
3650 | pm_qos_add_request(&adapter->netdev->pm_qos_req, | ||
3651 | PM_QOS_CPU_DMA_LATENCY, | ||
3652 | PM_QOS_DEFAULT_VALUE); | ||
3653 | |||
3654 | /* | ||
3655 | * before we allocate an interrupt, we must be ready to handle it. | ||
3656 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | ||
3657 | * as soon as we call pci_request_irq, so we have to setup our | ||
3658 | * clean_rx handler before we do so. | ||
3659 | */ | ||
3660 | e1000_configure(adapter); | ||
3661 | |||
3662 | err = e1000_request_irq(adapter); | ||
3663 | if (err) | ||
3664 | goto err_req_irq; | ||
3665 | |||
3666 | /* | ||
3667 | * Work around PCIe errata with MSI interrupts causing some chipsets to | ||
3668 | * ignore e1000e MSI messages, which means we need to test our MSI | ||
3669 | * interrupt now | ||
3670 | */ | ||
3671 | if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { | ||
3672 | err = e1000_test_msi(adapter); | ||
3673 | if (err) { | ||
3674 | e_err("Interrupt allocation failed\n"); | ||
3675 | goto err_req_irq; | ||
3676 | } | ||
3677 | } | ||
3678 | |||
3679 | /* From here on the code is the same as e1000e_up() */ | ||
3680 | clear_bit(__E1000_DOWN, &adapter->state); | ||
3681 | |||
3682 | napi_enable(&adapter->napi); | ||
3683 | |||
3684 | e1000_irq_enable(adapter); | ||
3685 | |||
3686 | netif_start_queue(netdev); | ||
3687 | |||
3688 | adapter->idle_check = true; | ||
3689 | pm_runtime_put(&pdev->dev); | ||
3690 | |||
3691 | /* fire a link status change interrupt to start the watchdog */ | ||
3692 | if (adapter->msix_entries) | ||
3693 | ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); | ||
3694 | else | ||
3695 | ew32(ICS, E1000_ICS_LSC); | ||
3696 | |||
3697 | return 0; | ||
3698 | |||
3699 | err_req_irq: | ||
3700 | e1000e_release_hw_control(adapter); | ||
3701 | e1000_power_down_phy(adapter); | ||
3702 | e1000e_free_rx_resources(adapter); | ||
3703 | err_setup_rx: | ||
3704 | e1000e_free_tx_resources(adapter); | ||
3705 | err_setup_tx: | ||
3706 | e1000e_reset(adapter); | ||
3707 | pm_runtime_put_sync(&pdev->dev); | ||
3708 | |||
3709 | return err; | ||
3710 | } | ||
3711 | |||
3712 | /** | ||
3713 | * e1000_close - Disables a network interface | ||
3714 | * @netdev: network interface device structure | ||
3715 | * | ||
3716 | * Returns 0, this is not allowed to fail | ||
3717 | * | ||
3718 | * The close entry point is called when an interface is de-activated | ||
3719 | * by the OS. The hardware is still under the drivers control, but | ||
3720 | * needs to be disabled. A global MAC reset is issued to stop the | ||
3721 | * hardware, and all transmit and receive resources are freed. | ||
3722 | **/ | ||
3723 | static int e1000_close(struct net_device *netdev) | ||
3724 | { | ||
3725 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3726 | struct pci_dev *pdev = adapter->pdev; | ||
3727 | |||
3728 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | ||
3729 | |||
3730 | pm_runtime_get_sync(&pdev->dev); | ||
3731 | |||
3732 | if (!test_bit(__E1000_DOWN, &adapter->state)) { | ||
3733 | e1000e_down(adapter); | ||
3734 | e1000_free_irq(adapter); | ||
3735 | } | ||
3736 | e1000_power_down_phy(adapter); | ||
3737 | |||
3738 | e1000e_free_tx_resources(adapter); | ||
3739 | e1000e_free_rx_resources(adapter); | ||
3740 | |||
3741 | /* | ||
3742 | * kill manageability vlan ID if supported, but not if a vlan with | ||
3743 | * the same ID is registered on the host OS (let 8021q kill it) | ||
3744 | */ | ||
3745 | if (adapter->hw.mng_cookie.status & | ||
3746 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) | ||
3747 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | ||
3748 | |||
3749 | /* | ||
3750 | * If AMT is enabled, let the firmware know that the network | ||
3751 | * interface is now closed | ||
3752 | */ | ||
3753 | if ((adapter->flags & FLAG_HAS_AMT) && | ||
3754 | !test_bit(__E1000_TESTING, &adapter->state)) | ||
3755 | e1000e_release_hw_control(adapter); | ||
3756 | |||
3757 | if ((adapter->flags & FLAG_HAS_ERT) || | ||
3758 | (adapter->hw.mac.type == e1000_pch2lan)) | ||
3759 | pm_qos_remove_request(&adapter->netdev->pm_qos_req); | ||
3760 | |||
3761 | pm_runtime_put_sync(&pdev->dev); | ||
3762 | |||
3763 | return 0; | ||
3764 | } | ||
3765 | /** | ||
3766 | * e1000_set_mac - Change the Ethernet Address of the NIC | ||
3767 | * @netdev: network interface device structure | ||
3768 | * @p: pointer to an address structure | ||
3769 | * | ||
3770 | * Returns 0 on success, negative on failure | ||
3771 | **/ | ||
3772 | static int e1000_set_mac(struct net_device *netdev, void *p) | ||
3773 | { | ||
3774 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
3775 | struct sockaddr *addr = p; | ||
3776 | |||
3777 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3778 | return -EADDRNOTAVAIL; | ||
3779 | |||
3780 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
3781 | memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); | ||
3782 | |||
3783 | e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | ||
3784 | |||
3785 | if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { | ||
3786 | /* activate the work around */ | ||
3787 | e1000e_set_laa_state_82571(&adapter->hw, 1); | ||
3788 | |||
3789 | /* | ||
3790 | * Hold a copy of the LAA in RAR[14] This is done so that | ||
3791 | * between the time RAR[0] gets clobbered and the time it | ||
3792 | * gets fixed (in e1000_watchdog), the actual LAA is in one | ||
3793 | * of the RARs and no incoming packets directed to this port | ||
3794 | * are dropped. Eventually the LAA will be in RAR[0] and | ||
3795 | * RAR[14] | ||
3796 | */ | ||
3797 | e1000e_rar_set(&adapter->hw, | ||
3798 | adapter->hw.mac.addr, | ||
3799 | adapter->hw.mac.rar_entry_count - 1); | ||
3800 | } | ||
3801 | |||
3802 | return 0; | ||
3803 | } | ||
3804 | |||
3805 | /** | ||
3806 | * e1000e_update_phy_task - work thread to update phy | ||
3807 | * @work: pointer to our work struct | ||
3808 | * | ||
3809 | * this worker thread exists because we must acquire a | ||
3810 | * semaphore to read the phy, which we could msleep while | ||
3811 | * waiting for it, and we can't msleep in a timer. | ||
3812 | **/ | ||
3813 | static void e1000e_update_phy_task(struct work_struct *work) | ||
3814 | { | ||
3815 | struct e1000_adapter *adapter = container_of(work, | ||
3816 | struct e1000_adapter, update_phy_task); | ||
3817 | |||
3818 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3819 | return; | ||
3820 | |||
3821 | e1000_get_phy_info(&adapter->hw); | ||
3822 | } | ||
3823 | |||
3824 | /* | ||
3825 | * Need to wait a few seconds after link up to get diagnostic information from | ||
3826 | * the phy | ||
3827 | */ | ||
3828 | static void e1000_update_phy_info(unsigned long data) | ||
3829 | { | ||
3830 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | ||
3831 | |||
3832 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3833 | return; | ||
3834 | |||
3835 | schedule_work(&adapter->update_phy_task); | ||
3836 | } | ||
3837 | |||
3838 | /** | ||
3839 | * e1000e_update_phy_stats - Update the PHY statistics counters | ||
3840 | * @adapter: board private structure | ||
3841 | * | ||
3842 | * Read/clear the upper 16-bit PHY registers and read/accumulate lower | ||
3843 | **/ | ||
3844 | static void e1000e_update_phy_stats(struct e1000_adapter *adapter) | ||
3845 | { | ||
3846 | struct e1000_hw *hw = &adapter->hw; | ||
3847 | s32 ret_val; | ||
3848 | u16 phy_data; | ||
3849 | |||
3850 | ret_val = hw->phy.ops.acquire(hw); | ||
3851 | if (ret_val) | ||
3852 | return; | ||
3853 | |||
3854 | /* | ||
3855 | * A page set is expensive so check if already on desired page. | ||
3856 | * If not, set to the page with the PHY status registers. | ||
3857 | */ | ||
3858 | hw->phy.addr = 1; | ||
3859 | ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | ||
3860 | &phy_data); | ||
3861 | if (ret_val) | ||
3862 | goto release; | ||
3863 | if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { | ||
3864 | ret_val = hw->phy.ops.set_page(hw, | ||
3865 | HV_STATS_PAGE << IGP_PAGE_SHIFT); | ||
3866 | if (ret_val) | ||
3867 | goto release; | ||
3868 | } | ||
3869 | |||
3870 | /* Single Collision Count */ | ||
3871 | hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); | ||
3872 | ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); | ||
3873 | if (!ret_val) | ||
3874 | adapter->stats.scc += phy_data; | ||
3875 | |||
3876 | /* Excessive Collision Count */ | ||
3877 | hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); | ||
3878 | ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); | ||
3879 | if (!ret_val) | ||
3880 | adapter->stats.ecol += phy_data; | ||
3881 | |||
3882 | /* Multiple Collision Count */ | ||
3883 | hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); | ||
3884 | ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); | ||
3885 | if (!ret_val) | ||
3886 | adapter->stats.mcc += phy_data; | ||
3887 | |||
3888 | /* Late Collision Count */ | ||
3889 | hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); | ||
3890 | ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); | ||
3891 | if (!ret_val) | ||
3892 | adapter->stats.latecol += phy_data; | ||
3893 | |||
3894 | /* Collision Count - also used for adaptive IFS */ | ||
3895 | hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); | ||
3896 | ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); | ||
3897 | if (!ret_val) | ||
3898 | hw->mac.collision_delta = phy_data; | ||
3899 | |||
3900 | /* Defer Count */ | ||
3901 | hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); | ||
3902 | ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); | ||
3903 | if (!ret_val) | ||
3904 | adapter->stats.dc += phy_data; | ||
3905 | |||
3906 | /* Transmit with no CRS */ | ||
3907 | hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); | ||
3908 | ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); | ||
3909 | if (!ret_val) | ||
3910 | adapter->stats.tncrs += phy_data; | ||
3911 | |||
3912 | release: | ||
3913 | hw->phy.ops.release(hw); | ||
3914 | } | ||
3915 | |||
3916 | /** | ||
3917 | * e1000e_update_stats - Update the board statistics counters | ||
3918 | * @adapter: board private structure | ||
3919 | **/ | ||
3920 | static void e1000e_update_stats(struct e1000_adapter *adapter) | ||
3921 | { | ||
3922 | struct net_device *netdev = adapter->netdev; | ||
3923 | struct e1000_hw *hw = &adapter->hw; | ||
3924 | struct pci_dev *pdev = adapter->pdev; | ||
3925 | |||
3926 | /* | ||
3927 | * Prevent stats update while adapter is being reset, or if the pci | ||
3928 | * connection is down. | ||
3929 | */ | ||
3930 | if (adapter->link_speed == 0) | ||
3931 | return; | ||
3932 | if (pci_channel_offline(pdev)) | ||
3933 | return; | ||
3934 | |||
3935 | adapter->stats.crcerrs += er32(CRCERRS); | ||
3936 | adapter->stats.gprc += er32(GPRC); | ||
3937 | adapter->stats.gorc += er32(GORCL); | ||
3938 | er32(GORCH); /* Clear gorc */ | ||
3939 | adapter->stats.bprc += er32(BPRC); | ||
3940 | adapter->stats.mprc += er32(MPRC); | ||
3941 | adapter->stats.roc += er32(ROC); | ||
3942 | |||
3943 | adapter->stats.mpc += er32(MPC); | ||
3944 | |||
3945 | /* Half-duplex statistics */ | ||
3946 | if (adapter->link_duplex == HALF_DUPLEX) { | ||
3947 | if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { | ||
3948 | e1000e_update_phy_stats(adapter); | ||
3949 | } else { | ||
3950 | adapter->stats.scc += er32(SCC); | ||
3951 | adapter->stats.ecol += er32(ECOL); | ||
3952 | adapter->stats.mcc += er32(MCC); | ||
3953 | adapter->stats.latecol += er32(LATECOL); | ||
3954 | adapter->stats.dc += er32(DC); | ||
3955 | |||
3956 | hw->mac.collision_delta = er32(COLC); | ||
3957 | |||
3958 | if ((hw->mac.type != e1000_82574) && | ||
3959 | (hw->mac.type != e1000_82583)) | ||
3960 | adapter->stats.tncrs += er32(TNCRS); | ||
3961 | } | ||
3962 | adapter->stats.colc += hw->mac.collision_delta; | ||
3963 | } | ||
3964 | |||
3965 | adapter->stats.xonrxc += er32(XONRXC); | ||
3966 | adapter->stats.xontxc += er32(XONTXC); | ||
3967 | adapter->stats.xoffrxc += er32(XOFFRXC); | ||
3968 | adapter->stats.xofftxc += er32(XOFFTXC); | ||
3969 | adapter->stats.gptc += er32(GPTC); | ||
3970 | adapter->stats.gotc += er32(GOTCL); | ||
3971 | er32(GOTCH); /* Clear gotc */ | ||
3972 | adapter->stats.rnbc += er32(RNBC); | ||
3973 | adapter->stats.ruc += er32(RUC); | ||
3974 | |||
3975 | adapter->stats.mptc += er32(MPTC); | ||
3976 | adapter->stats.bptc += er32(BPTC); | ||
3977 | |||
3978 | /* used for adaptive IFS */ | ||
3979 | |||
3980 | hw->mac.tx_packet_delta = er32(TPT); | ||
3981 | adapter->stats.tpt += hw->mac.tx_packet_delta; | ||
3982 | |||
3983 | adapter->stats.algnerrc += er32(ALGNERRC); | ||
3984 | adapter->stats.rxerrc += er32(RXERRC); | ||
3985 | adapter->stats.cexterr += er32(CEXTERR); | ||
3986 | adapter->stats.tsctc += er32(TSCTC); | ||
3987 | adapter->stats.tsctfc += er32(TSCTFC); | ||
3988 | |||
3989 | /* Fill out the OS statistics structure */ | ||
3990 | netdev->stats.multicast = adapter->stats.mprc; | ||
3991 | netdev->stats.collisions = adapter->stats.colc; | ||
3992 | |||
3993 | /* Rx Errors */ | ||
3994 | |||
3995 | /* | ||
3996 | * RLEC on some newer hardware can be incorrect so build | ||
3997 | * our own version based on RUC and ROC | ||
3998 | */ | ||
3999 | netdev->stats.rx_errors = adapter->stats.rxerrc + | ||
4000 | adapter->stats.crcerrs + adapter->stats.algnerrc + | ||
4001 | adapter->stats.ruc + adapter->stats.roc + | ||
4002 | adapter->stats.cexterr; | ||
4003 | netdev->stats.rx_length_errors = adapter->stats.ruc + | ||
4004 | adapter->stats.roc; | ||
4005 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; | ||
4006 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; | ||
4007 | netdev->stats.rx_missed_errors = adapter->stats.mpc; | ||
4008 | |||
4009 | /* Tx Errors */ | ||
4010 | netdev->stats.tx_errors = adapter->stats.ecol + | ||
4011 | adapter->stats.latecol; | ||
4012 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; | ||
4013 | netdev->stats.tx_window_errors = adapter->stats.latecol; | ||
4014 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; | ||
4015 | |||
4016 | /* Tx Dropped needs to be maintained elsewhere */ | ||
4017 | |||
4018 | /* Management Stats */ | ||
4019 | adapter->stats.mgptc += er32(MGTPTC); | ||
4020 | adapter->stats.mgprc += er32(MGTPRC); | ||
4021 | adapter->stats.mgpdc += er32(MGTPDC); | ||
4022 | } | ||
4023 | |||
4024 | /** | ||
4025 | * e1000_phy_read_status - Update the PHY register status snapshot | ||
4026 | * @adapter: board private structure | ||
4027 | **/ | ||
4028 | static void e1000_phy_read_status(struct e1000_adapter *adapter) | ||
4029 | { | ||
4030 | struct e1000_hw *hw = &adapter->hw; | ||
4031 | struct e1000_phy_regs *phy = &adapter->phy_regs; | ||
4032 | |||
4033 | if ((er32(STATUS) & E1000_STATUS_LU) && | ||
4034 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { | ||
4035 | int ret_val; | ||
4036 | |||
4037 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); | ||
4038 | ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); | ||
4039 | ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); | ||
4040 | ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); | ||
4041 | ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); | ||
4042 | ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); | ||
4043 | ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); | ||
4044 | ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); | ||
4045 | if (ret_val) | ||
4046 | e_warn("Error reading PHY register\n"); | ||
4047 | } else { | ||
4048 | /* | ||
4049 | * Do not read PHY registers if link is not up | ||
4050 | * Set values to typical power-on defaults | ||
4051 | */ | ||
4052 | phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); | ||
4053 | phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | | ||
4054 | BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | | ||
4055 | BMSR_ERCAP); | ||
4056 | phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | | ||
4057 | ADVERTISE_ALL | ADVERTISE_CSMA); | ||
4058 | phy->lpa = 0; | ||
4059 | phy->expansion = EXPANSION_ENABLENPAGE; | ||
4060 | phy->ctrl1000 = ADVERTISE_1000FULL; | ||
4061 | phy->stat1000 = 0; | ||
4062 | phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); | ||
4063 | } | ||
4064 | } | ||
4065 | |||
4066 | static void e1000_print_link_info(struct e1000_adapter *adapter) | ||
4067 | { | ||
4068 | struct e1000_hw *hw = &adapter->hw; | ||
4069 | u32 ctrl = er32(CTRL); | ||
4070 | |||
4071 | /* Link status message must follow this format for user tools */ | ||
4072 | printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " | ||
4073 | "Flow Control: %s\n", | ||
4074 | adapter->netdev->name, | ||
4075 | adapter->link_speed, | ||
4076 | (adapter->link_duplex == FULL_DUPLEX) ? | ||
4077 | "Full Duplex" : "Half Duplex", | ||
4078 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | ||
4079 | "Rx/Tx" : | ||
4080 | ((ctrl & E1000_CTRL_RFCE) ? "Rx" : | ||
4081 | ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); | ||
4082 | } | ||
4083 | |||
4084 | static bool e1000e_has_link(struct e1000_adapter *adapter) | ||
4085 | { | ||
4086 | struct e1000_hw *hw = &adapter->hw; | ||
4087 | bool link_active = 0; | ||
4088 | s32 ret_val = 0; | ||
4089 | |||
4090 | /* | ||
4091 | * get_link_status is set on LSC (link status) interrupt or | ||
4092 | * Rx sequence error interrupt. get_link_status will stay | ||
4093 | * false until the check_for_link establishes link | ||
4094 | * for copper adapters ONLY | ||
4095 | */ | ||
4096 | switch (hw->phy.media_type) { | ||
4097 | case e1000_media_type_copper: | ||
4098 | if (hw->mac.get_link_status) { | ||
4099 | ret_val = hw->mac.ops.check_for_link(hw); | ||
4100 | link_active = !hw->mac.get_link_status; | ||
4101 | } else { | ||
4102 | link_active = 1; | ||
4103 | } | ||
4104 | break; | ||
4105 | case e1000_media_type_fiber: | ||
4106 | ret_val = hw->mac.ops.check_for_link(hw); | ||
4107 | link_active = !!(er32(STATUS) & E1000_STATUS_LU); | ||
4108 | break; | ||
4109 | case e1000_media_type_internal_serdes: | ||
4110 | ret_val = hw->mac.ops.check_for_link(hw); | ||
4111 | link_active = adapter->hw.mac.serdes_has_link; | ||
4112 | break; | ||
4113 | default: | ||
4114 | case e1000_media_type_unknown: | ||
4115 | break; | ||
4116 | } | ||
4117 | |||
4118 | if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && | ||
4119 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | ||
4120 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ | ||
4121 | e_info("Gigabit has been disabled, downgrading speed\n"); | ||
4122 | } | ||
4123 | |||
4124 | return link_active; | ||
4125 | } | ||
4126 | |||
4127 | static void e1000e_enable_receives(struct e1000_adapter *adapter) | ||
4128 | { | ||
4129 | /* make sure the receive unit is started */ | ||
4130 | if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && | ||
4131 | (adapter->flags & FLAG_RX_RESTART_NOW)) { | ||
4132 | struct e1000_hw *hw = &adapter->hw; | ||
4133 | u32 rctl = er32(RCTL); | ||
4134 | ew32(RCTL, rctl | E1000_RCTL_EN); | ||
4135 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | ||
4136 | } | ||
4137 | } | ||
4138 | |||
4139 | static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) | ||
4140 | { | ||
4141 | struct e1000_hw *hw = &adapter->hw; | ||
4142 | |||
4143 | /* | ||
4144 | * With 82574 controllers, PHY needs to be checked periodically | ||
4145 | * for hung state and reset, if two calls return true | ||
4146 | */ | ||
4147 | if (e1000_check_phy_82574(hw)) | ||
4148 | adapter->phy_hang_count++; | ||
4149 | else | ||
4150 | adapter->phy_hang_count = 0; | ||
4151 | |||
4152 | if (adapter->phy_hang_count > 1) { | ||
4153 | adapter->phy_hang_count = 0; | ||
4154 | schedule_work(&adapter->reset_task); | ||
4155 | } | ||
4156 | } | ||
4157 | |||
4158 | /** | ||
4159 | * e1000_watchdog - Timer Call-back | ||
4160 | * @data: pointer to adapter cast into an unsigned long | ||
4161 | **/ | ||
4162 | static void e1000_watchdog(unsigned long data) | ||
4163 | { | ||
4164 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | ||
4165 | |||
4166 | /* Do the rest outside of interrupt context */ | ||
4167 | schedule_work(&adapter->watchdog_task); | ||
4168 | |||
4169 | /* TODO: make this use queue_delayed_work() */ | ||
4170 | } | ||
4171 | |||
4172 | static void e1000_watchdog_task(struct work_struct *work) | ||
4173 | { | ||
4174 | struct e1000_adapter *adapter = container_of(work, | ||
4175 | struct e1000_adapter, watchdog_task); | ||
4176 | struct net_device *netdev = adapter->netdev; | ||
4177 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
4178 | struct e1000_phy_info *phy = &adapter->hw.phy; | ||
4179 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4180 | struct e1000_hw *hw = &adapter->hw; | ||
4181 | u32 link, tctl; | ||
4182 | |||
4183 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4184 | return; | ||
4185 | |||
4186 | link = e1000e_has_link(adapter); | ||
4187 | if ((netif_carrier_ok(netdev)) && link) { | ||
4188 | /* Cancel scheduled suspend requests. */ | ||
4189 | pm_runtime_resume(netdev->dev.parent); | ||
4190 | |||
4191 | e1000e_enable_receives(adapter); | ||
4192 | goto link_up; | ||
4193 | } | ||
4194 | |||
4195 | if ((e1000e_enable_tx_pkt_filtering(hw)) && | ||
4196 | (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) | ||
4197 | e1000_update_mng_vlan(adapter); | ||
4198 | |||
4199 | if (link) { | ||
4200 | if (!netif_carrier_ok(netdev)) { | ||
4201 | bool txb2b = 1; | ||
4202 | |||
4203 | /* Cancel scheduled suspend requests. */ | ||
4204 | pm_runtime_resume(netdev->dev.parent); | ||
4205 | |||
4206 | /* update snapshot of PHY registers on LSC */ | ||
4207 | e1000_phy_read_status(adapter); | ||
4208 | mac->ops.get_link_up_info(&adapter->hw, | ||
4209 | &adapter->link_speed, | ||
4210 | &adapter->link_duplex); | ||
4211 | e1000_print_link_info(adapter); | ||
4212 | /* | ||
4213 | * On supported PHYs, check for duplex mismatch only | ||
4214 | * if link has autonegotiated at 10/100 half | ||
4215 | */ | ||
4216 | if ((hw->phy.type == e1000_phy_igp_3 || | ||
4217 | hw->phy.type == e1000_phy_bm) && | ||
4218 | (hw->mac.autoneg == true) && | ||
4219 | (adapter->link_speed == SPEED_10 || | ||
4220 | adapter->link_speed == SPEED_100) && | ||
4221 | (adapter->link_duplex == HALF_DUPLEX)) { | ||
4222 | u16 autoneg_exp; | ||
4223 | |||
4224 | e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); | ||
4225 | |||
4226 | if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) | ||
4227 | e_info("Autonegotiated half duplex but" | ||
4228 | " link partner cannot autoneg. " | ||
4229 | " Try forcing full duplex if " | ||
4230 | "link gets many collisions.\n"); | ||
4231 | } | ||
4232 | |||
4233 | /* adjust timeout factor according to speed/duplex */ | ||
4234 | adapter->tx_timeout_factor = 1; | ||
4235 | switch (adapter->link_speed) { | ||
4236 | case SPEED_10: | ||
4237 | txb2b = 0; | ||
4238 | adapter->tx_timeout_factor = 16; | ||
4239 | break; | ||
4240 | case SPEED_100: | ||
4241 | txb2b = 0; | ||
4242 | adapter->tx_timeout_factor = 10; | ||
4243 | break; | ||
4244 | } | ||
4245 | |||
4246 | /* | ||
4247 | * workaround: re-program speed mode bit after | ||
4248 | * link-up event | ||
4249 | */ | ||
4250 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && | ||
4251 | !txb2b) { | ||
4252 | u32 tarc0; | ||
4253 | tarc0 = er32(TARC(0)); | ||
4254 | tarc0 &= ~SPEED_MODE_BIT; | ||
4255 | ew32(TARC(0), tarc0); | ||
4256 | } | ||
4257 | |||
4258 | /* | ||
4259 | * disable TSO for pcie and 10/100 speeds, to avoid | ||
4260 | * some hardware issues | ||
4261 | */ | ||
4262 | if (!(adapter->flags & FLAG_TSO_FORCE)) { | ||
4263 | switch (adapter->link_speed) { | ||
4264 | case SPEED_10: | ||
4265 | case SPEED_100: | ||
4266 | e_info("10/100 speed: disabling TSO\n"); | ||
4267 | netdev->features &= ~NETIF_F_TSO; | ||
4268 | netdev->features &= ~NETIF_F_TSO6; | ||
4269 | break; | ||
4270 | case SPEED_1000: | ||
4271 | netdev->features |= NETIF_F_TSO; | ||
4272 | netdev->features |= NETIF_F_TSO6; | ||
4273 | break; | ||
4274 | default: | ||
4275 | /* oops */ | ||
4276 | break; | ||
4277 | } | ||
4278 | } | ||
4279 | |||
4280 | /* | ||
4281 | * enable transmits in the hardware, need to do this | ||
4282 | * after setting TARC(0) | ||
4283 | */ | ||
4284 | tctl = er32(TCTL); | ||
4285 | tctl |= E1000_TCTL_EN; | ||
4286 | ew32(TCTL, tctl); | ||
4287 | |||
4288 | /* | ||
4289 | * Perform any post-link-up configuration before | ||
4290 | * reporting link up. | ||
4291 | */ | ||
4292 | if (phy->ops.cfg_on_link_up) | ||
4293 | phy->ops.cfg_on_link_up(hw); | ||
4294 | |||
4295 | netif_carrier_on(netdev); | ||
4296 | |||
4297 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
4298 | mod_timer(&adapter->phy_info_timer, | ||
4299 | round_jiffies(jiffies + 2 * HZ)); | ||
4300 | } | ||
4301 | } else { | ||
4302 | if (netif_carrier_ok(netdev)) { | ||
4303 | adapter->link_speed = 0; | ||
4304 | adapter->link_duplex = 0; | ||
4305 | /* Link status message must follow this format */ | ||
4306 | printk(KERN_INFO "e1000e: %s NIC Link is Down\n", | ||
4307 | adapter->netdev->name); | ||
4308 | netif_carrier_off(netdev); | ||
4309 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
4310 | mod_timer(&adapter->phy_info_timer, | ||
4311 | round_jiffies(jiffies + 2 * HZ)); | ||
4312 | |||
4313 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) | ||
4314 | schedule_work(&adapter->reset_task); | ||
4315 | else | ||
4316 | pm_schedule_suspend(netdev->dev.parent, | ||
4317 | LINK_TIMEOUT); | ||
4318 | } | ||
4319 | } | ||
4320 | |||
4321 | link_up: | ||
4322 | spin_lock(&adapter->stats64_lock); | ||
4323 | e1000e_update_stats(adapter); | ||
4324 | |||
4325 | mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | ||
4326 | adapter->tpt_old = adapter->stats.tpt; | ||
4327 | mac->collision_delta = adapter->stats.colc - adapter->colc_old; | ||
4328 | adapter->colc_old = adapter->stats.colc; | ||
4329 | |||
4330 | adapter->gorc = adapter->stats.gorc - adapter->gorc_old; | ||
4331 | adapter->gorc_old = adapter->stats.gorc; | ||
4332 | adapter->gotc = adapter->stats.gotc - adapter->gotc_old; | ||
4333 | adapter->gotc_old = adapter->stats.gotc; | ||
4334 | spin_unlock(&adapter->stats64_lock); | ||
4335 | |||
4336 | e1000e_update_adaptive(&adapter->hw); | ||
4337 | |||
4338 | if (!netif_carrier_ok(netdev) && | ||
4339 | (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { | ||
4340 | /* | ||
4341 | * We've lost link, so the controller stops DMA, | ||
4342 | * but we've got queued Tx work that's never going | ||
4343 | * to get done, so reset controller to flush Tx. | ||
4344 | * (Do the reset outside of interrupt context). | ||
4345 | */ | ||
4346 | schedule_work(&adapter->reset_task); | ||
4347 | /* return immediately since reset is imminent */ | ||
4348 | return; | ||
4349 | } | ||
4350 | |||
4351 | /* Simple mode for Interrupt Throttle Rate (ITR) */ | ||
4352 | if (adapter->itr_setting == 4) { | ||
4353 | /* | ||
4354 | * Symmetric Tx/Rx gets a reduced ITR=2000; | ||
4355 | * Total asymmetrical Tx or Rx gets ITR=8000; | ||
4356 | * everyone else is between 2000-8000. | ||
4357 | */ | ||
4358 | u32 goc = (adapter->gotc + adapter->gorc) / 10000; | ||
4359 | u32 dif = (adapter->gotc > adapter->gorc ? | ||
4360 | adapter->gotc - adapter->gorc : | ||
4361 | adapter->gorc - adapter->gotc) / 10000; | ||
4362 | u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | ||
4363 | |||
4364 | ew32(ITR, 1000000000 / (itr * 256)); | ||
4365 | } | ||
4366 | |||
4367 | /* Cause software interrupt to ensure Rx ring is cleaned */ | ||
4368 | if (adapter->msix_entries) | ||
4369 | ew32(ICS, adapter->rx_ring->ims_val); | ||
4370 | else | ||
4371 | ew32(ICS, E1000_ICS_RXDMT0); | ||
4372 | |||
4373 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4374 | e1000e_flush_descriptors(adapter); | ||
4375 | |||
4376 | /* Force detection of hung controller every watchdog period */ | ||
4377 | adapter->detect_tx_hung = 1; | ||
4378 | |||
4379 | /* | ||
4380 | * With 82571 controllers, LAA may be overwritten due to controller | ||
4381 | * reset from the other port. Set the appropriate LAA in RAR[0] | ||
4382 | */ | ||
4383 | if (e1000e_get_laa_state_82571(hw)) | ||
4384 | e1000e_rar_set(hw, adapter->hw.mac.addr, 0); | ||
4385 | |||
4386 | if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) | ||
4387 | e1000e_check_82574_phy_workaround(adapter); | ||
4388 | |||
4389 | /* Reset the timer */ | ||
4390 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
4391 | mod_timer(&adapter->watchdog_timer, | ||
4392 | round_jiffies(jiffies + 2 * HZ)); | ||
4393 | } | ||
4394 | |||
4395 | #define E1000_TX_FLAGS_CSUM 0x00000001 | ||
4396 | #define E1000_TX_FLAGS_VLAN 0x00000002 | ||
4397 | #define E1000_TX_FLAGS_TSO 0x00000004 | ||
4398 | #define E1000_TX_FLAGS_IPV4 0x00000008 | ||
4399 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | ||
4400 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | ||
4401 | |||
4402 | static int e1000_tso(struct e1000_adapter *adapter, | ||
4403 | struct sk_buff *skb) | ||
4404 | { | ||
4405 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4406 | struct e1000_context_desc *context_desc; | ||
4407 | struct e1000_buffer *buffer_info; | ||
4408 | unsigned int i; | ||
4409 | u32 cmd_length = 0; | ||
4410 | u16 ipcse = 0, tucse, mss; | ||
4411 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | ||
4412 | |||
4413 | if (!skb_is_gso(skb)) | ||
4414 | return 0; | ||
4415 | |||
4416 | if (skb_header_cloned(skb)) { | ||
4417 | int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
4418 | |||
4419 | if (err) | ||
4420 | return err; | ||
4421 | } | ||
4422 | |||
4423 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
4424 | mss = skb_shinfo(skb)->gso_size; | ||
4425 | if (skb->protocol == htons(ETH_P_IP)) { | ||
4426 | struct iphdr *iph = ip_hdr(skb); | ||
4427 | iph->tot_len = 0; | ||
4428 | iph->check = 0; | ||
4429 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
4430 | 0, IPPROTO_TCP, 0); | ||
4431 | cmd_length = E1000_TXD_CMD_IP; | ||
4432 | ipcse = skb_transport_offset(skb) - 1; | ||
4433 | } else if (skb_is_gso_v6(skb)) { | ||
4434 | ipv6_hdr(skb)->payload_len = 0; | ||
4435 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
4436 | &ipv6_hdr(skb)->daddr, | ||
4437 | 0, IPPROTO_TCP, 0); | ||
4438 | ipcse = 0; | ||
4439 | } | ||
4440 | ipcss = skb_network_offset(skb); | ||
4441 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | ||
4442 | tucss = skb_transport_offset(skb); | ||
4443 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
4444 | tucse = 0; | ||
4445 | |||
4446 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | ||
4447 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | ||
4448 | |||
4449 | i = tx_ring->next_to_use; | ||
4450 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | ||
4451 | buffer_info = &tx_ring->buffer_info[i]; | ||
4452 | |||
4453 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | ||
4454 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | ||
4455 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | ||
4456 | context_desc->upper_setup.tcp_fields.tucss = tucss; | ||
4457 | context_desc->upper_setup.tcp_fields.tucso = tucso; | ||
4458 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | ||
4459 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | ||
4460 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | ||
4461 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | ||
4462 | |||
4463 | buffer_info->time_stamp = jiffies; | ||
4464 | buffer_info->next_to_watch = i; | ||
4465 | |||
4466 | i++; | ||
4467 | if (i == tx_ring->count) | ||
4468 | i = 0; | ||
4469 | tx_ring->next_to_use = i; | ||
4470 | |||
4471 | return 1; | ||
4472 | } | ||
4473 | |||
4474 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | ||
4475 | { | ||
4476 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4477 | struct e1000_context_desc *context_desc; | ||
4478 | struct e1000_buffer *buffer_info; | ||
4479 | unsigned int i; | ||
4480 | u8 css; | ||
4481 | u32 cmd_len = E1000_TXD_CMD_DEXT; | ||
4482 | __be16 protocol; | ||
4483 | |||
4484 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
4485 | return 0; | ||
4486 | |||
4487 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | ||
4488 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | ||
4489 | else | ||
4490 | protocol = skb->protocol; | ||
4491 | |||
4492 | switch (protocol) { | ||
4493 | case cpu_to_be16(ETH_P_IP): | ||
4494 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
4495 | cmd_len |= E1000_TXD_CMD_TCP; | ||
4496 | break; | ||
4497 | case cpu_to_be16(ETH_P_IPV6): | ||
4498 | /* XXX not handling all IPV6 headers */ | ||
4499 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
4500 | cmd_len |= E1000_TXD_CMD_TCP; | ||
4501 | break; | ||
4502 | default: | ||
4503 | if (unlikely(net_ratelimit())) | ||
4504 | e_warn("checksum_partial proto=%x!\n", | ||
4505 | be16_to_cpu(protocol)); | ||
4506 | break; | ||
4507 | } | ||
4508 | |||
4509 | css = skb_checksum_start_offset(skb); | ||
4510 | |||
4511 | i = tx_ring->next_to_use; | ||
4512 | buffer_info = &tx_ring->buffer_info[i]; | ||
4513 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | ||
4514 | |||
4515 | context_desc->lower_setup.ip_config = 0; | ||
4516 | context_desc->upper_setup.tcp_fields.tucss = css; | ||
4517 | context_desc->upper_setup.tcp_fields.tucso = | ||
4518 | css + skb->csum_offset; | ||
4519 | context_desc->upper_setup.tcp_fields.tucse = 0; | ||
4520 | context_desc->tcp_seg_setup.data = 0; | ||
4521 | context_desc->cmd_and_length = cpu_to_le32(cmd_len); | ||
4522 | |||
4523 | buffer_info->time_stamp = jiffies; | ||
4524 | buffer_info->next_to_watch = i; | ||
4525 | |||
4526 | i++; | ||
4527 | if (i == tx_ring->count) | ||
4528 | i = 0; | ||
4529 | tx_ring->next_to_use = i; | ||
4530 | |||
4531 | return 1; | ||
4532 | } | ||
4533 | |||
4534 | #define E1000_MAX_PER_TXD 8192 | ||
4535 | #define E1000_MAX_TXD_PWR 12 | ||
4536 | |||
4537 | static int e1000_tx_map(struct e1000_adapter *adapter, | ||
4538 | struct sk_buff *skb, unsigned int first, | ||
4539 | unsigned int max_per_txd, unsigned int nr_frags, | ||
4540 | unsigned int mss) | ||
4541 | { | ||
4542 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4543 | struct pci_dev *pdev = adapter->pdev; | ||
4544 | struct e1000_buffer *buffer_info; | ||
4545 | unsigned int len = skb_headlen(skb); | ||
4546 | unsigned int offset = 0, size, count = 0, i; | ||
4547 | unsigned int f, bytecount, segs; | ||
4548 | |||
4549 | i = tx_ring->next_to_use; | ||
4550 | |||
4551 | while (len) { | ||
4552 | buffer_info = &tx_ring->buffer_info[i]; | ||
4553 | size = min(len, max_per_txd); | ||
4554 | |||
4555 | buffer_info->length = size; | ||
4556 | buffer_info->time_stamp = jiffies; | ||
4557 | buffer_info->next_to_watch = i; | ||
4558 | buffer_info->dma = dma_map_single(&pdev->dev, | ||
4559 | skb->data + offset, | ||
4560 | size, DMA_TO_DEVICE); | ||
4561 | buffer_info->mapped_as_page = false; | ||
4562 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | ||
4563 | goto dma_error; | ||
4564 | |||
4565 | len -= size; | ||
4566 | offset += size; | ||
4567 | count++; | ||
4568 | |||
4569 | if (len) { | ||
4570 | i++; | ||
4571 | if (i == tx_ring->count) | ||
4572 | i = 0; | ||
4573 | } | ||
4574 | } | ||
4575 | |||
4576 | for (f = 0; f < nr_frags; f++) { | ||
4577 | struct skb_frag_struct *frag; | ||
4578 | |||
4579 | frag = &skb_shinfo(skb)->frags[f]; | ||
4580 | len = frag->size; | ||
4581 | offset = frag->page_offset; | ||
4582 | |||
4583 | while (len) { | ||
4584 | i++; | ||
4585 | if (i == tx_ring->count) | ||
4586 | i = 0; | ||
4587 | |||
4588 | buffer_info = &tx_ring->buffer_info[i]; | ||
4589 | size = min(len, max_per_txd); | ||
4590 | |||
4591 | buffer_info->length = size; | ||
4592 | buffer_info->time_stamp = jiffies; | ||
4593 | buffer_info->next_to_watch = i; | ||
4594 | buffer_info->dma = dma_map_page(&pdev->dev, frag->page, | ||
4595 | offset, size, | ||
4596 | DMA_TO_DEVICE); | ||
4597 | buffer_info->mapped_as_page = true; | ||
4598 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | ||
4599 | goto dma_error; | ||
4600 | |||
4601 | len -= size; | ||
4602 | offset += size; | ||
4603 | count++; | ||
4604 | } | ||
4605 | } | ||
4606 | |||
4607 | segs = skb_shinfo(skb)->gso_segs ? : 1; | ||
4608 | /* multiply data chunks by size of headers */ | ||
4609 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; | ||
4610 | |||
4611 | tx_ring->buffer_info[i].skb = skb; | ||
4612 | tx_ring->buffer_info[i].segs = segs; | ||
4613 | tx_ring->buffer_info[i].bytecount = bytecount; | ||
4614 | tx_ring->buffer_info[first].next_to_watch = i; | ||
4615 | |||
4616 | return count; | ||
4617 | |||
4618 | dma_error: | ||
4619 | dev_err(&pdev->dev, "Tx DMA map failed\n"); | ||
4620 | buffer_info->dma = 0; | ||
4621 | if (count) | ||
4622 | count--; | ||
4623 | |||
4624 | while (count--) { | ||
4625 | if (i == 0) | ||
4626 | i += tx_ring->count; | ||
4627 | i--; | ||
4628 | buffer_info = &tx_ring->buffer_info[i]; | ||
4629 | e1000_put_txbuf(adapter, buffer_info); | ||
4630 | } | ||
4631 | |||
4632 | return 0; | ||
4633 | } | ||
4634 | |||
4635 | static void e1000_tx_queue(struct e1000_adapter *adapter, | ||
4636 | int tx_flags, int count) | ||
4637 | { | ||
4638 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4639 | struct e1000_tx_desc *tx_desc = NULL; | ||
4640 | struct e1000_buffer *buffer_info; | ||
4641 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | ||
4642 | unsigned int i; | ||
4643 | |||
4644 | if (tx_flags & E1000_TX_FLAGS_TSO) { | ||
4645 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | ||
4646 | E1000_TXD_CMD_TSE; | ||
4647 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | ||
4648 | |||
4649 | if (tx_flags & E1000_TX_FLAGS_IPV4) | ||
4650 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; | ||
4651 | } | ||
4652 | |||
4653 | if (tx_flags & E1000_TX_FLAGS_CSUM) { | ||
4654 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | ||
4655 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | ||
4656 | } | ||
4657 | |||
4658 | if (tx_flags & E1000_TX_FLAGS_VLAN) { | ||
4659 | txd_lower |= E1000_TXD_CMD_VLE; | ||
4660 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | ||
4661 | } | ||
4662 | |||
4663 | i = tx_ring->next_to_use; | ||
4664 | |||
4665 | do { | ||
4666 | buffer_info = &tx_ring->buffer_info[i]; | ||
4667 | tx_desc = E1000_TX_DESC(*tx_ring, i); | ||
4668 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
4669 | tx_desc->lower.data = | ||
4670 | cpu_to_le32(txd_lower | buffer_info->length); | ||
4671 | tx_desc->upper.data = cpu_to_le32(txd_upper); | ||
4672 | |||
4673 | i++; | ||
4674 | if (i == tx_ring->count) | ||
4675 | i = 0; | ||
4676 | } while (--count > 0); | ||
4677 | |||
4678 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | ||
4679 | |||
4680 | /* | ||
4681 | * Force memory writes to complete before letting h/w | ||
4682 | * know there are new descriptors to fetch. (Only | ||
4683 | * applicable for weak-ordered memory model archs, | ||
4684 | * such as IA-64). | ||
4685 | */ | ||
4686 | wmb(); | ||
4687 | |||
4688 | tx_ring->next_to_use = i; | ||
4689 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | ||
4690 | /* | ||
4691 | * we need this if more than one processor can write to our tail | ||
4692 | * at a time, it synchronizes IO on IA64/Altix systems | ||
4693 | */ | ||
4694 | mmiowb(); | ||
4695 | } | ||
4696 | |||
4697 | #define MINIMUM_DHCP_PACKET_SIZE 282 | ||
4698 | static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | ||
4699 | struct sk_buff *skb) | ||
4700 | { | ||
4701 | struct e1000_hw *hw = &adapter->hw; | ||
4702 | u16 length, offset; | ||
4703 | |||
4704 | if (vlan_tx_tag_present(skb)) { | ||
4705 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | ||
4706 | (adapter->hw.mng_cookie.status & | ||
4707 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | ||
4708 | return 0; | ||
4709 | } | ||
4710 | |||
4711 | if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) | ||
4712 | return 0; | ||
4713 | |||
4714 | if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) | ||
4715 | return 0; | ||
4716 | |||
4717 | { | ||
4718 | const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); | ||
4719 | struct udphdr *udp; | ||
4720 | |||
4721 | if (ip->protocol != IPPROTO_UDP) | ||
4722 | return 0; | ||
4723 | |||
4724 | udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); | ||
4725 | if (ntohs(udp->dest) != 67) | ||
4726 | return 0; | ||
4727 | |||
4728 | offset = (u8 *)udp + 8 - skb->data; | ||
4729 | length = skb->len - offset; | ||
4730 | return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); | ||
4731 | } | ||
4732 | |||
4733 | return 0; | ||
4734 | } | ||
4735 | |||
4736 | static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | ||
4737 | { | ||
4738 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4739 | |||
4740 | netif_stop_queue(netdev); | ||
4741 | /* | ||
4742 | * Herbert's original patch had: | ||
4743 | * smp_mb__after_netif_stop_queue(); | ||
4744 | * but since that doesn't exist yet, just open code it. | ||
4745 | */ | ||
4746 | smp_mb(); | ||
4747 | |||
4748 | /* | ||
4749 | * We need to check again in a case another CPU has just | ||
4750 | * made room available. | ||
4751 | */ | ||
4752 | if (e1000_desc_unused(adapter->tx_ring) < size) | ||
4753 | return -EBUSY; | ||
4754 | |||
4755 | /* A reprieve! */ | ||
4756 | netif_start_queue(netdev); | ||
4757 | ++adapter->restart_queue; | ||
4758 | return 0; | ||
4759 | } | ||
4760 | |||
4761 | static int e1000_maybe_stop_tx(struct net_device *netdev, int size) | ||
4762 | { | ||
4763 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4764 | |||
4765 | if (e1000_desc_unused(adapter->tx_ring) >= size) | ||
4766 | return 0; | ||
4767 | return __e1000_maybe_stop_tx(netdev, size); | ||
4768 | } | ||
4769 | |||
4770 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | ||
4771 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | ||
4772 | struct net_device *netdev) | ||
4773 | { | ||
4774 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4775 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
4776 | unsigned int first; | ||
4777 | unsigned int max_per_txd = E1000_MAX_PER_TXD; | ||
4778 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | ||
4779 | unsigned int tx_flags = 0; | ||
4780 | unsigned int len = skb_headlen(skb); | ||
4781 | unsigned int nr_frags; | ||
4782 | unsigned int mss; | ||
4783 | int count = 0; | ||
4784 | int tso; | ||
4785 | unsigned int f; | ||
4786 | |||
4787 | if (test_bit(__E1000_DOWN, &adapter->state)) { | ||
4788 | dev_kfree_skb_any(skb); | ||
4789 | return NETDEV_TX_OK; | ||
4790 | } | ||
4791 | |||
4792 | if (skb->len <= 0) { | ||
4793 | dev_kfree_skb_any(skb); | ||
4794 | return NETDEV_TX_OK; | ||
4795 | } | ||
4796 | |||
4797 | mss = skb_shinfo(skb)->gso_size; | ||
4798 | /* | ||
4799 | * The controller does a simple calculation to | ||
4800 | * make sure there is enough room in the FIFO before | ||
4801 | * initiating the DMA for each buffer. The calc is: | ||
4802 | * 4 = ceil(buffer len/mss). To make sure we don't | ||
4803 | * overrun the FIFO, adjust the max buffer len if mss | ||
4804 | * drops. | ||
4805 | */ | ||
4806 | if (mss) { | ||
4807 | u8 hdr_len; | ||
4808 | max_per_txd = min(mss << 2, max_per_txd); | ||
4809 | max_txd_pwr = fls(max_per_txd) - 1; | ||
4810 | |||
4811 | /* | ||
4812 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data | ||
4813 | * points to just header, pull a few bytes of payload from | ||
4814 | * frags into skb->data | ||
4815 | */ | ||
4816 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
4817 | /* | ||
4818 | * we do this workaround for ES2LAN, but it is un-necessary, | ||
4819 | * avoiding it could save a lot of cycles | ||
4820 | */ | ||
4821 | if (skb->data_len && (hdr_len == len)) { | ||
4822 | unsigned int pull_size; | ||
4823 | |||
4824 | pull_size = min((unsigned int)4, skb->data_len); | ||
4825 | if (!__pskb_pull_tail(skb, pull_size)) { | ||
4826 | e_err("__pskb_pull_tail failed.\n"); | ||
4827 | dev_kfree_skb_any(skb); | ||
4828 | return NETDEV_TX_OK; | ||
4829 | } | ||
4830 | len = skb_headlen(skb); | ||
4831 | } | ||
4832 | } | ||
4833 | |||
4834 | /* reserve a descriptor for the offload context */ | ||
4835 | if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) | ||
4836 | count++; | ||
4837 | count++; | ||
4838 | |||
4839 | count += TXD_USE_COUNT(len, max_txd_pwr); | ||
4840 | |||
4841 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
4842 | for (f = 0; f < nr_frags; f++) | ||
4843 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, | ||
4844 | max_txd_pwr); | ||
4845 | |||
4846 | if (adapter->hw.mac.tx_pkt_filtering) | ||
4847 | e1000_transfer_dhcp_info(adapter, skb); | ||
4848 | |||
4849 | /* | ||
4850 | * need: count + 2 desc gap to keep tail from touching | ||
4851 | * head, otherwise try next time | ||
4852 | */ | ||
4853 | if (e1000_maybe_stop_tx(netdev, count + 2)) | ||
4854 | return NETDEV_TX_BUSY; | ||
4855 | |||
4856 | if (vlan_tx_tag_present(skb)) { | ||
4857 | tx_flags |= E1000_TX_FLAGS_VLAN; | ||
4858 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); | ||
4859 | } | ||
4860 | |||
4861 | first = tx_ring->next_to_use; | ||
4862 | |||
4863 | tso = e1000_tso(adapter, skb); | ||
4864 | if (tso < 0) { | ||
4865 | dev_kfree_skb_any(skb); | ||
4866 | return NETDEV_TX_OK; | ||
4867 | } | ||
4868 | |||
4869 | if (tso) | ||
4870 | tx_flags |= E1000_TX_FLAGS_TSO; | ||
4871 | else if (e1000_tx_csum(adapter, skb)) | ||
4872 | tx_flags |= E1000_TX_FLAGS_CSUM; | ||
4873 | |||
4874 | /* | ||
4875 | * Old method was to assume IPv4 packet by default if TSO was enabled. | ||
4876 | * 82571 hardware supports TSO capabilities for IPv6 as well... | ||
4877 | * no longer assume, we must. | ||
4878 | */ | ||
4879 | if (skb->protocol == htons(ETH_P_IP)) | ||
4880 | tx_flags |= E1000_TX_FLAGS_IPV4; | ||
4881 | |||
4882 | /* if count is 0 then mapping error has occurred */ | ||
4883 | count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); | ||
4884 | if (count) { | ||
4885 | e1000_tx_queue(adapter, tx_flags, count); | ||
4886 | /* Make sure there is space in the ring for the next send. */ | ||
4887 | e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); | ||
4888 | |||
4889 | } else { | ||
4890 | dev_kfree_skb_any(skb); | ||
4891 | tx_ring->buffer_info[first].time_stamp = 0; | ||
4892 | tx_ring->next_to_use = first; | ||
4893 | } | ||
4894 | |||
4895 | return NETDEV_TX_OK; | ||
4896 | } | ||
4897 | |||
4898 | /** | ||
4899 | * e1000_tx_timeout - Respond to a Tx Hang | ||
4900 | * @netdev: network interface device structure | ||
4901 | **/ | ||
4902 | static void e1000_tx_timeout(struct net_device *netdev) | ||
4903 | { | ||
4904 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4905 | |||
4906 | /* Do the reset outside of interrupt context */ | ||
4907 | adapter->tx_timeout_count++; | ||
4908 | schedule_work(&adapter->reset_task); | ||
4909 | } | ||
4910 | |||
4911 | static void e1000_reset_task(struct work_struct *work) | ||
4912 | { | ||
4913 | struct e1000_adapter *adapter; | ||
4914 | adapter = container_of(work, struct e1000_adapter, reset_task); | ||
4915 | |||
4916 | /* don't run the task if already down */ | ||
4917 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4918 | return; | ||
4919 | |||
4920 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | ||
4921 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | ||
4922 | e1000e_dump(adapter); | ||
4923 | e_err("Reset adapter\n"); | ||
4924 | } | ||
4925 | e1000e_reinit_locked(adapter); | ||
4926 | } | ||
4927 | |||
4928 | /** | ||
4929 | * e1000_get_stats64 - Get System Network Statistics | ||
4930 | * @netdev: network interface device structure | ||
4931 | * @stats: rtnl_link_stats64 pointer | ||
4932 | * | ||
4933 | * Returns the address of the device statistics structure. | ||
4934 | **/ | ||
4935 | struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | ||
4936 | struct rtnl_link_stats64 *stats) | ||
4937 | { | ||
4938 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4939 | |||
4940 | memset(stats, 0, sizeof(struct rtnl_link_stats64)); | ||
4941 | spin_lock(&adapter->stats64_lock); | ||
4942 | e1000e_update_stats(adapter); | ||
4943 | /* Fill out the OS statistics structure */ | ||
4944 | stats->rx_bytes = adapter->stats.gorc; | ||
4945 | stats->rx_packets = adapter->stats.gprc; | ||
4946 | stats->tx_bytes = adapter->stats.gotc; | ||
4947 | stats->tx_packets = adapter->stats.gptc; | ||
4948 | stats->multicast = adapter->stats.mprc; | ||
4949 | stats->collisions = adapter->stats.colc; | ||
4950 | |||
4951 | /* Rx Errors */ | ||
4952 | |||
4953 | /* | ||
4954 | * RLEC on some newer hardware can be incorrect so build | ||
4955 | * our own version based on RUC and ROC | ||
4956 | */ | ||
4957 | stats->rx_errors = adapter->stats.rxerrc + | ||
4958 | adapter->stats.crcerrs + adapter->stats.algnerrc + | ||
4959 | adapter->stats.ruc + adapter->stats.roc + | ||
4960 | adapter->stats.cexterr; | ||
4961 | stats->rx_length_errors = adapter->stats.ruc + | ||
4962 | adapter->stats.roc; | ||
4963 | stats->rx_crc_errors = adapter->stats.crcerrs; | ||
4964 | stats->rx_frame_errors = adapter->stats.algnerrc; | ||
4965 | stats->rx_missed_errors = adapter->stats.mpc; | ||
4966 | |||
4967 | /* Tx Errors */ | ||
4968 | stats->tx_errors = adapter->stats.ecol + | ||
4969 | adapter->stats.latecol; | ||
4970 | stats->tx_aborted_errors = adapter->stats.ecol; | ||
4971 | stats->tx_window_errors = adapter->stats.latecol; | ||
4972 | stats->tx_carrier_errors = adapter->stats.tncrs; | ||
4973 | |||
4974 | /* Tx Dropped needs to be maintained elsewhere */ | ||
4975 | |||
4976 | spin_unlock(&adapter->stats64_lock); | ||
4977 | return stats; | ||
4978 | } | ||
4979 | |||
4980 | /** | ||
4981 | * e1000_change_mtu - Change the Maximum Transfer Unit | ||
4982 | * @netdev: network interface device structure | ||
4983 | * @new_mtu: new value for maximum frame size | ||
4984 | * | ||
4985 | * Returns 0 on success, negative on failure | ||
4986 | **/ | ||
4987 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | ||
4988 | { | ||
4989 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4990 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | ||
4991 | |||
4992 | /* Jumbo frame support */ | ||
4993 | if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && | ||
4994 | !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { | ||
4995 | e_err("Jumbo Frames not supported.\n"); | ||
4996 | return -EINVAL; | ||
4997 | } | ||
4998 | |||
4999 | /* Supported frame sizes */ | ||
5000 | if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || | ||
5001 | (max_frame > adapter->max_hw_frame_size)) { | ||
5002 | e_err("Unsupported MTU setting\n"); | ||
5003 | return -EINVAL; | ||
5004 | } | ||
5005 | |||
5006 | /* Jumbo frame workaround on 82579 requires CRC be stripped */ | ||
5007 | if ((adapter->hw.mac.type == e1000_pch2lan) && | ||
5008 | !(adapter->flags2 & FLAG2_CRC_STRIPPING) && | ||
5009 | (new_mtu > ETH_DATA_LEN)) { | ||
5010 | e_err("Jumbo Frames not supported on 82579 when CRC " | ||
5011 | "stripping is disabled.\n"); | ||
5012 | return -EINVAL; | ||
5013 | } | ||
5014 | |||
5015 | /* 82573 Errata 17 */ | ||
5016 | if (((adapter->hw.mac.type == e1000_82573) || | ||
5017 | (adapter->hw.mac.type == e1000_82574)) && | ||
5018 | (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { | ||
5019 | adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; | ||
5020 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); | ||
5021 | } | ||
5022 | |||
5023 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
5024 | usleep_range(1000, 2000); | ||
5025 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ | ||
5026 | adapter->max_frame_size = max_frame; | ||
5027 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
5028 | netdev->mtu = new_mtu; | ||
5029 | if (netif_running(netdev)) | ||
5030 | e1000e_down(adapter); | ||
5031 | |||
5032 | /* | ||
5033 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
5034 | * means we reserve 2 more, this pushes us to allocate from the next | ||
5035 | * larger slab size. | ||
5036 | * i.e. RXBUFFER_2048 --> size-4096 slab | ||
5037 | * However with the new *_jumbo_rx* routines, jumbo receives will use | ||
5038 | * fragmented skbs | ||
5039 | */ | ||
5040 | |||
5041 | if (max_frame <= 2048) | ||
5042 | adapter->rx_buffer_len = 2048; | ||
5043 | else | ||
5044 | adapter->rx_buffer_len = 4096; | ||
5045 | |||
5046 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | ||
5047 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | ||
5048 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | ||
5049 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | ||
5050 | + ETH_FCS_LEN; | ||
5051 | |||
5052 | if (netif_running(netdev)) | ||
5053 | e1000e_up(adapter); | ||
5054 | else | ||
5055 | e1000e_reset(adapter); | ||
5056 | |||
5057 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
5058 | |||
5059 | return 0; | ||
5060 | } | ||
5061 | |||
5062 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | ||
5063 | int cmd) | ||
5064 | { | ||
5065 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5066 | struct mii_ioctl_data *data = if_mii(ifr); | ||
5067 | |||
5068 | if (adapter->hw.phy.media_type != e1000_media_type_copper) | ||
5069 | return -EOPNOTSUPP; | ||
5070 | |||
5071 | switch (cmd) { | ||
5072 | case SIOCGMIIPHY: | ||
5073 | data->phy_id = adapter->hw.phy.addr; | ||
5074 | break; | ||
5075 | case SIOCGMIIREG: | ||
5076 | e1000_phy_read_status(adapter); | ||
5077 | |||
5078 | switch (data->reg_num & 0x1F) { | ||
5079 | case MII_BMCR: | ||
5080 | data->val_out = adapter->phy_regs.bmcr; | ||
5081 | break; | ||
5082 | case MII_BMSR: | ||
5083 | data->val_out = adapter->phy_regs.bmsr; | ||
5084 | break; | ||
5085 | case MII_PHYSID1: | ||
5086 | data->val_out = (adapter->hw.phy.id >> 16); | ||
5087 | break; | ||
5088 | case MII_PHYSID2: | ||
5089 | data->val_out = (adapter->hw.phy.id & 0xFFFF); | ||
5090 | break; | ||
5091 | case MII_ADVERTISE: | ||
5092 | data->val_out = adapter->phy_regs.advertise; | ||
5093 | break; | ||
5094 | case MII_LPA: | ||
5095 | data->val_out = adapter->phy_regs.lpa; | ||
5096 | break; | ||
5097 | case MII_EXPANSION: | ||
5098 | data->val_out = adapter->phy_regs.expansion; | ||
5099 | break; | ||
5100 | case MII_CTRL1000: | ||
5101 | data->val_out = adapter->phy_regs.ctrl1000; | ||
5102 | break; | ||
5103 | case MII_STAT1000: | ||
5104 | data->val_out = adapter->phy_regs.stat1000; | ||
5105 | break; | ||
5106 | case MII_ESTATUS: | ||
5107 | data->val_out = adapter->phy_regs.estatus; | ||
5108 | break; | ||
5109 | default: | ||
5110 | return -EIO; | ||
5111 | } | ||
5112 | break; | ||
5113 | case SIOCSMIIREG: | ||
5114 | default: | ||
5115 | return -EOPNOTSUPP; | ||
5116 | } | ||
5117 | return 0; | ||
5118 | } | ||
5119 | |||
5120 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
5121 | { | ||
5122 | switch (cmd) { | ||
5123 | case SIOCGMIIPHY: | ||
5124 | case SIOCGMIIREG: | ||
5125 | case SIOCSMIIREG: | ||
5126 | return e1000_mii_ioctl(netdev, ifr, cmd); | ||
5127 | default: | ||
5128 | return -EOPNOTSUPP; | ||
5129 | } | ||
5130 | } | ||
5131 | |||
5132 | static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | ||
5133 | { | ||
5134 | struct e1000_hw *hw = &adapter->hw; | ||
5135 | u32 i, mac_reg; | ||
5136 | u16 phy_reg, wuc_enable; | ||
5137 | int retval = 0; | ||
5138 | |||
5139 | /* copy MAC RARs to PHY RARs */ | ||
5140 | e1000_copy_rx_addrs_to_phy_ich8lan(hw); | ||
5141 | |||
5142 | retval = hw->phy.ops.acquire(hw); | ||
5143 | if (retval) { | ||
5144 | e_err("Could not acquire PHY\n"); | ||
5145 | return retval; | ||
5146 | } | ||
5147 | |||
5148 | /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ | ||
5149 | retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); | ||
5150 | if (retval) | ||
5151 | goto out; | ||
5152 | |||
5153 | /* copy MAC MTA to PHY MTA - only needed for pchlan */ | ||
5154 | for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { | ||
5155 | mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); | ||
5156 | hw->phy.ops.write_reg_page(hw, BM_MTA(i), | ||
5157 | (u16)(mac_reg & 0xFFFF)); | ||
5158 | hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, | ||
5159 | (u16)((mac_reg >> 16) & 0xFFFF)); | ||
5160 | } | ||
5161 | |||
5162 | /* configure PHY Rx Control register */ | ||
5163 | hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); | ||
5164 | mac_reg = er32(RCTL); | ||
5165 | if (mac_reg & E1000_RCTL_UPE) | ||
5166 | phy_reg |= BM_RCTL_UPE; | ||
5167 | if (mac_reg & E1000_RCTL_MPE) | ||
5168 | phy_reg |= BM_RCTL_MPE; | ||
5169 | phy_reg &= ~(BM_RCTL_MO_MASK); | ||
5170 | if (mac_reg & E1000_RCTL_MO_3) | ||
5171 | phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) | ||
5172 | << BM_RCTL_MO_SHIFT); | ||
5173 | if (mac_reg & E1000_RCTL_BAM) | ||
5174 | phy_reg |= BM_RCTL_BAM; | ||
5175 | if (mac_reg & E1000_RCTL_PMCF) | ||
5176 | phy_reg |= BM_RCTL_PMCF; | ||
5177 | mac_reg = er32(CTRL); | ||
5178 | if (mac_reg & E1000_CTRL_RFCE) | ||
5179 | phy_reg |= BM_RCTL_RFCE; | ||
5180 | hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); | ||
5181 | |||
5182 | /* enable PHY wakeup in MAC register */ | ||
5183 | ew32(WUFC, wufc); | ||
5184 | ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); | ||
5185 | |||
5186 | /* configure and enable PHY wakeup in PHY registers */ | ||
5187 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); | ||
5188 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | ||
5189 | |||
5190 | /* activate PHY wakeup */ | ||
5191 | wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; | ||
5192 | retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); | ||
5193 | if (retval) | ||
5194 | e_err("Could not set PHY Host Wakeup bit\n"); | ||
5195 | out: | ||
5196 | hw->phy.ops.release(hw); | ||
5197 | |||
5198 | return retval; | ||
5199 | } | ||
5200 | |||
5201 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | ||
5202 | bool runtime) | ||
5203 | { | ||
5204 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5205 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5206 | struct e1000_hw *hw = &adapter->hw; | ||
5207 | u32 ctrl, ctrl_ext, rctl, status; | ||
5208 | /* Runtime suspend should only enable wakeup for link changes */ | ||
5209 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | ||
5210 | int retval = 0; | ||
5211 | |||
5212 | netif_device_detach(netdev); | ||
5213 | |||
5214 | if (netif_running(netdev)) { | ||
5215 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | ||
5216 | e1000e_down(adapter); | ||
5217 | e1000_free_irq(adapter); | ||
5218 | } | ||
5219 | e1000e_reset_interrupt_capability(adapter); | ||
5220 | |||
5221 | retval = pci_save_state(pdev); | ||
5222 | if (retval) | ||
5223 | return retval; | ||
5224 | |||
5225 | status = er32(STATUS); | ||
5226 | if (status & E1000_STATUS_LU) | ||
5227 | wufc &= ~E1000_WUFC_LNKC; | ||
5228 | |||
5229 | if (wufc) { | ||
5230 | e1000_setup_rctl(adapter); | ||
5231 | e1000_set_multi(netdev); | ||
5232 | |||
5233 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
5234 | if (wufc & E1000_WUFC_MC) { | ||
5235 | rctl = er32(RCTL); | ||
5236 | rctl |= E1000_RCTL_MPE; | ||
5237 | ew32(RCTL, rctl); | ||
5238 | } | ||
5239 | |||
5240 | ctrl = er32(CTRL); | ||
5241 | /* advertise wake from D3Cold */ | ||
5242 | #define E1000_CTRL_ADVD3WUC 0x00100000 | ||
5243 | /* phy power management enable */ | ||
5244 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | ||
5245 | ctrl |= E1000_CTRL_ADVD3WUC; | ||
5246 | if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) | ||
5247 | ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; | ||
5248 | ew32(CTRL, ctrl); | ||
5249 | |||
5250 | if (adapter->hw.phy.media_type == e1000_media_type_fiber || | ||
5251 | adapter->hw.phy.media_type == | ||
5252 | e1000_media_type_internal_serdes) { | ||
5253 | /* keep the laser running in D3 */ | ||
5254 | ctrl_ext = er32(CTRL_EXT); | ||
5255 | ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; | ||
5256 | ew32(CTRL_EXT, ctrl_ext); | ||
5257 | } | ||
5258 | |||
5259 | if (adapter->flags & FLAG_IS_ICH) | ||
5260 | e1000_suspend_workarounds_ich8lan(&adapter->hw); | ||
5261 | |||
5262 | /* Allow time for pending master requests to run */ | ||
5263 | e1000e_disable_pcie_master(&adapter->hw); | ||
5264 | |||
5265 | if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { | ||
5266 | /* enable wakeup by the PHY */ | ||
5267 | retval = e1000_init_phy_wakeup(adapter, wufc); | ||
5268 | if (retval) | ||
5269 | return retval; | ||
5270 | } else { | ||
5271 | /* enable wakeup by the MAC */ | ||
5272 | ew32(WUFC, wufc); | ||
5273 | ew32(WUC, E1000_WUC_PME_EN); | ||
5274 | } | ||
5275 | } else { | ||
5276 | ew32(WUC, 0); | ||
5277 | ew32(WUFC, 0); | ||
5278 | } | ||
5279 | |||
5280 | *enable_wake = !!wufc; | ||
5281 | |||
5282 | /* make sure adapter isn't asleep if manageability is enabled */ | ||
5283 | if ((adapter->flags & FLAG_MNG_PT_ENABLED) || | ||
5284 | (hw->mac.ops.check_mng_mode(hw))) | ||
5285 | *enable_wake = true; | ||
5286 | |||
5287 | if (adapter->hw.phy.type == e1000_phy_igp_3) | ||
5288 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | ||
5289 | |||
5290 | /* | ||
5291 | * Release control of h/w to f/w. If f/w is AMT enabled, this | ||
5292 | * would have already happened in close and is redundant. | ||
5293 | */ | ||
5294 | e1000e_release_hw_control(adapter); | ||
5295 | |||
5296 | pci_disable_device(pdev); | ||
5297 | |||
5298 | return 0; | ||
5299 | } | ||
5300 | |||
5301 | static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) | ||
5302 | { | ||
5303 | if (sleep && wake) { | ||
5304 | pci_prepare_to_sleep(pdev); | ||
5305 | return; | ||
5306 | } | ||
5307 | |||
5308 | pci_wake_from_d3(pdev, wake); | ||
5309 | pci_set_power_state(pdev, PCI_D3hot); | ||
5310 | } | ||
5311 | |||
5312 | static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | ||
5313 | bool wake) | ||
5314 | { | ||
5315 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5316 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5317 | |||
5318 | /* | ||
5319 | * The pci-e switch on some quad port adapters will report a | ||
5320 | * correctable error when the MAC transitions from D0 to D3. To | ||
5321 | * prevent this we need to mask off the correctable errors on the | ||
5322 | * downstream port of the pci-e switch. | ||
5323 | */ | ||
5324 | if (adapter->flags & FLAG_IS_QUAD_PORT) { | ||
5325 | struct pci_dev *us_dev = pdev->bus->self; | ||
5326 | int pos = pci_pcie_cap(us_dev); | ||
5327 | u16 devctl; | ||
5328 | |||
5329 | pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); | ||
5330 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, | ||
5331 | (devctl & ~PCI_EXP_DEVCTL_CERE)); | ||
5332 | |||
5333 | e1000_power_off(pdev, sleep, wake); | ||
5334 | |||
5335 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); | ||
5336 | } else { | ||
5337 | e1000_power_off(pdev, sleep, wake); | ||
5338 | } | ||
5339 | } | ||
5340 | |||
5341 | #ifdef CONFIG_PCIEASPM | ||
5342 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
5343 | { | ||
5344 | pci_disable_link_state_locked(pdev, state); | ||
5345 | } | ||
5346 | #else | ||
5347 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
5348 | { | ||
5349 | int pos; | ||
5350 | u16 reg16; | ||
5351 | |||
5352 | /* | ||
5353 | * Both device and parent should have the same ASPM setting. | ||
5354 | * Disable ASPM in downstream component first and then upstream. | ||
5355 | */ | ||
5356 | pos = pci_pcie_cap(pdev); | ||
5357 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | ||
5358 | reg16 &= ~state; | ||
5359 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | ||
5360 | |||
5361 | if (!pdev->bus->self) | ||
5362 | return; | ||
5363 | |||
5364 | pos = pci_pcie_cap(pdev->bus->self); | ||
5365 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); | ||
5366 | reg16 &= ~state; | ||
5367 | pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); | ||
5368 | } | ||
5369 | #endif | ||
5370 | static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
5371 | { | ||
5372 | dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | ||
5373 | (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", | ||
5374 | (state & PCIE_LINK_STATE_L1) ? "L1" : ""); | ||
5375 | |||
5376 | __e1000e_disable_aspm(pdev, state); | ||
5377 | } | ||
5378 | |||
5379 | #ifdef CONFIG_PM | ||
5380 | static bool e1000e_pm_ready(struct e1000_adapter *adapter) | ||
5381 | { | ||
5382 | return !!adapter->tx_ring->buffer_info; | ||
5383 | } | ||
5384 | |||
5385 | static int __e1000_resume(struct pci_dev *pdev) | ||
5386 | { | ||
5387 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5388 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5389 | struct e1000_hw *hw = &adapter->hw; | ||
5390 | u16 aspm_disable_flag = 0; | ||
5391 | u32 err; | ||
5392 | |||
5393 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) | ||
5394 | aspm_disable_flag = PCIE_LINK_STATE_L0S; | ||
5395 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) | ||
5396 | aspm_disable_flag |= PCIE_LINK_STATE_L1; | ||
5397 | if (aspm_disable_flag) | ||
5398 | e1000e_disable_aspm(pdev, aspm_disable_flag); | ||
5399 | |||
5400 | pci_set_power_state(pdev, PCI_D0); | ||
5401 | pci_restore_state(pdev); | ||
5402 | pci_save_state(pdev); | ||
5403 | |||
5404 | e1000e_set_interrupt_capability(adapter); | ||
5405 | if (netif_running(netdev)) { | ||
5406 | err = e1000_request_irq(adapter); | ||
5407 | if (err) | ||
5408 | return err; | ||
5409 | } | ||
5410 | |||
5411 | if (hw->mac.type == e1000_pch2lan) | ||
5412 | e1000_resume_workarounds_pchlan(&adapter->hw); | ||
5413 | |||
5414 | e1000e_power_up_phy(adapter); | ||
5415 | |||
5416 | /* report the system wakeup cause from S3/S4 */ | ||
5417 | if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { | ||
5418 | u16 phy_data; | ||
5419 | |||
5420 | e1e_rphy(&adapter->hw, BM_WUS, &phy_data); | ||
5421 | if (phy_data) { | ||
5422 | e_info("PHY Wakeup cause - %s\n", | ||
5423 | phy_data & E1000_WUS_EX ? "Unicast Packet" : | ||
5424 | phy_data & E1000_WUS_MC ? "Multicast Packet" : | ||
5425 | phy_data & E1000_WUS_BC ? "Broadcast Packet" : | ||
5426 | phy_data & E1000_WUS_MAG ? "Magic Packet" : | ||
5427 | phy_data & E1000_WUS_LNKC ? "Link Status " | ||
5428 | " Change" : "other"); | ||
5429 | } | ||
5430 | e1e_wphy(&adapter->hw, BM_WUS, ~0); | ||
5431 | } else { | ||
5432 | u32 wus = er32(WUS); | ||
5433 | if (wus) { | ||
5434 | e_info("MAC Wakeup cause - %s\n", | ||
5435 | wus & E1000_WUS_EX ? "Unicast Packet" : | ||
5436 | wus & E1000_WUS_MC ? "Multicast Packet" : | ||
5437 | wus & E1000_WUS_BC ? "Broadcast Packet" : | ||
5438 | wus & E1000_WUS_MAG ? "Magic Packet" : | ||
5439 | wus & E1000_WUS_LNKC ? "Link Status Change" : | ||
5440 | "other"); | ||
5441 | } | ||
5442 | ew32(WUS, ~0); | ||
5443 | } | ||
5444 | |||
5445 | e1000e_reset(adapter); | ||
5446 | |||
5447 | e1000_init_manageability_pt(adapter); | ||
5448 | |||
5449 | if (netif_running(netdev)) | ||
5450 | e1000e_up(adapter); | ||
5451 | |||
5452 | netif_device_attach(netdev); | ||
5453 | |||
5454 | /* | ||
5455 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
5456 | * is up. For all other cases, let the f/w know that the h/w is now | ||
5457 | * under the control of the driver. | ||
5458 | */ | ||
5459 | if (!(adapter->flags & FLAG_HAS_AMT)) | ||
5460 | e1000e_get_hw_control(adapter); | ||
5461 | |||
5462 | return 0; | ||
5463 | } | ||
5464 | |||
5465 | #ifdef CONFIG_PM_SLEEP | ||
5466 | static int e1000_suspend(struct device *dev) | ||
5467 | { | ||
5468 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5469 | int retval; | ||
5470 | bool wake; | ||
5471 | |||
5472 | retval = __e1000_shutdown(pdev, &wake, false); | ||
5473 | if (!retval) | ||
5474 | e1000_complete_shutdown(pdev, true, wake); | ||
5475 | |||
5476 | return retval; | ||
5477 | } | ||
5478 | |||
5479 | static int e1000_resume(struct device *dev) | ||
5480 | { | ||
5481 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5482 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5483 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5484 | |||
5485 | if (e1000e_pm_ready(adapter)) | ||
5486 | adapter->idle_check = true; | ||
5487 | |||
5488 | return __e1000_resume(pdev); | ||
5489 | } | ||
5490 | #endif /* CONFIG_PM_SLEEP */ | ||
5491 | |||
5492 | #ifdef CONFIG_PM_RUNTIME | ||
5493 | static int e1000_runtime_suspend(struct device *dev) | ||
5494 | { | ||
5495 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5496 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5497 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5498 | |||
5499 | if (e1000e_pm_ready(adapter)) { | ||
5500 | bool wake; | ||
5501 | |||
5502 | __e1000_shutdown(pdev, &wake, true); | ||
5503 | } | ||
5504 | |||
5505 | return 0; | ||
5506 | } | ||
5507 | |||
5508 | static int e1000_idle(struct device *dev) | ||
5509 | { | ||
5510 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5511 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5512 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5513 | |||
5514 | if (!e1000e_pm_ready(adapter)) | ||
5515 | return 0; | ||
5516 | |||
5517 | if (adapter->idle_check) { | ||
5518 | adapter->idle_check = false; | ||
5519 | if (!e1000e_has_link(adapter)) | ||
5520 | pm_schedule_suspend(dev, MSEC_PER_SEC); | ||
5521 | } | ||
5522 | |||
5523 | return -EBUSY; | ||
5524 | } | ||
5525 | |||
5526 | static int e1000_runtime_resume(struct device *dev) | ||
5527 | { | ||
5528 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5529 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5530 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5531 | |||
5532 | if (!e1000e_pm_ready(adapter)) | ||
5533 | return 0; | ||
5534 | |||
5535 | adapter->idle_check = !dev->power.runtime_auto; | ||
5536 | return __e1000_resume(pdev); | ||
5537 | } | ||
5538 | #endif /* CONFIG_PM_RUNTIME */ | ||
5539 | #endif /* CONFIG_PM */ | ||
5540 | |||
5541 | static void e1000_shutdown(struct pci_dev *pdev) | ||
5542 | { | ||
5543 | bool wake = false; | ||
5544 | |||
5545 | __e1000_shutdown(pdev, &wake, false); | ||
5546 | |||
5547 | if (system_state == SYSTEM_POWER_OFF) | ||
5548 | e1000_complete_shutdown(pdev, false, wake); | ||
5549 | } | ||
5550 | |||
5551 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
5552 | |||
5553 | static irqreturn_t e1000_intr_msix(int irq, void *data) | ||
5554 | { | ||
5555 | struct net_device *netdev = data; | ||
5556 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5557 | |||
5558 | if (adapter->msix_entries) { | ||
5559 | int vector, msix_irq; | ||
5560 | |||
5561 | vector = 0; | ||
5562 | msix_irq = adapter->msix_entries[vector].vector; | ||
5563 | disable_irq(msix_irq); | ||
5564 | e1000_intr_msix_rx(msix_irq, netdev); | ||
5565 | enable_irq(msix_irq); | ||
5566 | |||
5567 | vector++; | ||
5568 | msix_irq = adapter->msix_entries[vector].vector; | ||
5569 | disable_irq(msix_irq); | ||
5570 | e1000_intr_msix_tx(msix_irq, netdev); | ||
5571 | enable_irq(msix_irq); | ||
5572 | |||
5573 | vector++; | ||
5574 | msix_irq = adapter->msix_entries[vector].vector; | ||
5575 | disable_irq(msix_irq); | ||
5576 | e1000_msix_other(msix_irq, netdev); | ||
5577 | enable_irq(msix_irq); | ||
5578 | } | ||
5579 | |||
5580 | return IRQ_HANDLED; | ||
5581 | } | ||
5582 | |||
5583 | /* | ||
5584 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
5585 | * without having to re-enable interrupts. It's not called while | ||
5586 | * the interrupt routine is executing. | ||
5587 | */ | ||
5588 | static void e1000_netpoll(struct net_device *netdev) | ||
5589 | { | ||
5590 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5591 | |||
5592 | switch (adapter->int_mode) { | ||
5593 | case E1000E_INT_MODE_MSIX: | ||
5594 | e1000_intr_msix(adapter->pdev->irq, netdev); | ||
5595 | break; | ||
5596 | case E1000E_INT_MODE_MSI: | ||
5597 | disable_irq(adapter->pdev->irq); | ||
5598 | e1000_intr_msi(adapter->pdev->irq, netdev); | ||
5599 | enable_irq(adapter->pdev->irq); | ||
5600 | break; | ||
5601 | default: /* E1000E_INT_MODE_LEGACY */ | ||
5602 | disable_irq(adapter->pdev->irq); | ||
5603 | e1000_intr(adapter->pdev->irq, netdev); | ||
5604 | enable_irq(adapter->pdev->irq); | ||
5605 | break; | ||
5606 | } | ||
5607 | } | ||
5608 | #endif | ||
5609 | |||
5610 | /** | ||
5611 | * e1000_io_error_detected - called when PCI error is detected | ||
5612 | * @pdev: Pointer to PCI device | ||
5613 | * @state: The current pci connection state | ||
5614 | * | ||
5615 | * This function is called after a PCI bus error affecting | ||
5616 | * this device has been detected. | ||
5617 | */ | ||
5618 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | ||
5619 | pci_channel_state_t state) | ||
5620 | { | ||
5621 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5622 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5623 | |||
5624 | netif_device_detach(netdev); | ||
5625 | |||
5626 | if (state == pci_channel_io_perm_failure) | ||
5627 | return PCI_ERS_RESULT_DISCONNECT; | ||
5628 | |||
5629 | if (netif_running(netdev)) | ||
5630 | e1000e_down(adapter); | ||
5631 | pci_disable_device(pdev); | ||
5632 | |||
5633 | /* Request a slot slot reset. */ | ||
5634 | return PCI_ERS_RESULT_NEED_RESET; | ||
5635 | } | ||
5636 | |||
5637 | /** | ||
5638 | * e1000_io_slot_reset - called after the pci bus has been reset. | ||
5639 | * @pdev: Pointer to PCI device | ||
5640 | * | ||
5641 | * Restart the card from scratch, as if from a cold-boot. Implementation | ||
5642 | * resembles the first-half of the e1000_resume routine. | ||
5643 | */ | ||
5644 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | ||
5645 | { | ||
5646 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5647 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5648 | struct e1000_hw *hw = &adapter->hw; | ||
5649 | u16 aspm_disable_flag = 0; | ||
5650 | int err; | ||
5651 | pci_ers_result_t result; | ||
5652 | |||
5653 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) | ||
5654 | aspm_disable_flag = PCIE_LINK_STATE_L0S; | ||
5655 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) | ||
5656 | aspm_disable_flag |= PCIE_LINK_STATE_L1; | ||
5657 | if (aspm_disable_flag) | ||
5658 | e1000e_disable_aspm(pdev, aspm_disable_flag); | ||
5659 | |||
5660 | err = pci_enable_device_mem(pdev); | ||
5661 | if (err) { | ||
5662 | dev_err(&pdev->dev, | ||
5663 | "Cannot re-enable PCI device after reset.\n"); | ||
5664 | result = PCI_ERS_RESULT_DISCONNECT; | ||
5665 | } else { | ||
5666 | pci_set_master(pdev); | ||
5667 | pdev->state_saved = true; | ||
5668 | pci_restore_state(pdev); | ||
5669 | |||
5670 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
5671 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
5672 | |||
5673 | e1000e_reset(adapter); | ||
5674 | ew32(WUS, ~0); | ||
5675 | result = PCI_ERS_RESULT_RECOVERED; | ||
5676 | } | ||
5677 | |||
5678 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
5679 | |||
5680 | return result; | ||
5681 | } | ||
5682 | |||
5683 | /** | ||
5684 | * e1000_io_resume - called when traffic can start flowing again. | ||
5685 | * @pdev: Pointer to PCI device | ||
5686 | * | ||
5687 | * This callback is called when the error recovery driver tells us that | ||
5688 | * its OK to resume normal operation. Implementation resembles the | ||
5689 | * second-half of the e1000_resume routine. | ||
5690 | */ | ||
5691 | static void e1000_io_resume(struct pci_dev *pdev) | ||
5692 | { | ||
5693 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5694 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5695 | |||
5696 | e1000_init_manageability_pt(adapter); | ||
5697 | |||
5698 | if (netif_running(netdev)) { | ||
5699 | if (e1000e_up(adapter)) { | ||
5700 | dev_err(&pdev->dev, | ||
5701 | "can't bring device back up after reset\n"); | ||
5702 | return; | ||
5703 | } | ||
5704 | } | ||
5705 | |||
5706 | netif_device_attach(netdev); | ||
5707 | |||
5708 | /* | ||
5709 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
5710 | * is up. For all other cases, let the f/w know that the h/w is now | ||
5711 | * under the control of the driver. | ||
5712 | */ | ||
5713 | if (!(adapter->flags & FLAG_HAS_AMT)) | ||
5714 | e1000e_get_hw_control(adapter); | ||
5715 | |||
5716 | } | ||
5717 | |||
5718 | static void e1000_print_device_info(struct e1000_adapter *adapter) | ||
5719 | { | ||
5720 | struct e1000_hw *hw = &adapter->hw; | ||
5721 | struct net_device *netdev = adapter->netdev; | ||
5722 | u32 ret_val; | ||
5723 | u8 pba_str[E1000_PBANUM_LENGTH]; | ||
5724 | |||
5725 | /* print bus type/speed/width info */ | ||
5726 | e_info("(PCI Express:2.5GT/s:%s) %pM\n", | ||
5727 | /* bus width */ | ||
5728 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | ||
5729 | "Width x1"), | ||
5730 | /* MAC address */ | ||
5731 | netdev->dev_addr); | ||
5732 | e_info("Intel(R) PRO/%s Network Connection\n", | ||
5733 | (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); | ||
5734 | ret_val = e1000_read_pba_string_generic(hw, pba_str, | ||
5735 | E1000_PBANUM_LENGTH); | ||
5736 | if (ret_val) | ||
5737 | strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); | ||
5738 | e_info("MAC: %d, PHY: %d, PBA No: %s\n", | ||
5739 | hw->mac.type, hw->phy.type, pba_str); | ||
5740 | } | ||
5741 | |||
5742 | static void e1000_eeprom_checks(struct e1000_adapter *adapter) | ||
5743 | { | ||
5744 | struct e1000_hw *hw = &adapter->hw; | ||
5745 | int ret_val; | ||
5746 | u16 buf = 0; | ||
5747 | |||
5748 | if (hw->mac.type != e1000_82573) | ||
5749 | return; | ||
5750 | |||
5751 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); | ||
5752 | if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { | ||
5753 | /* Deep Smart Power Down (DSPD) */ | ||
5754 | dev_warn(&adapter->pdev->dev, | ||
5755 | "Warning: detected DSPD enabled in EEPROM\n"); | ||
5756 | } | ||
5757 | } | ||
5758 | |||
5759 | static const struct net_device_ops e1000e_netdev_ops = { | ||
5760 | .ndo_open = e1000_open, | ||
5761 | .ndo_stop = e1000_close, | ||
5762 | .ndo_start_xmit = e1000_xmit_frame, | ||
5763 | .ndo_get_stats64 = e1000e_get_stats64, | ||
5764 | .ndo_set_multicast_list = e1000_set_multi, | ||
5765 | .ndo_set_mac_address = e1000_set_mac, | ||
5766 | .ndo_change_mtu = e1000_change_mtu, | ||
5767 | .ndo_do_ioctl = e1000_ioctl, | ||
5768 | .ndo_tx_timeout = e1000_tx_timeout, | ||
5769 | .ndo_validate_addr = eth_validate_addr, | ||
5770 | |||
5771 | .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, | ||
5772 | .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, | ||
5773 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
5774 | .ndo_poll_controller = e1000_netpoll, | ||
5775 | #endif | ||
5776 | }; | ||
5777 | |||
5778 | /** | ||
5779 | * e1000_probe - Device Initialization Routine | ||
5780 | * @pdev: PCI device information struct | ||
5781 | * @ent: entry in e1000_pci_tbl | ||
5782 | * | ||
5783 | * Returns 0 on success, negative on failure | ||
5784 | * | ||
5785 | * e1000_probe initializes an adapter identified by a pci_dev structure. | ||
5786 | * The OS initialization, configuring of the adapter private structure, | ||
5787 | * and a hardware reset occur. | ||
5788 | **/ | ||
5789 | static int __devinit e1000_probe(struct pci_dev *pdev, | ||
5790 | const struct pci_device_id *ent) | ||
5791 | { | ||
5792 | struct net_device *netdev; | ||
5793 | struct e1000_adapter *adapter; | ||
5794 | struct e1000_hw *hw; | ||
5795 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | ||
5796 | resource_size_t mmio_start, mmio_len; | ||
5797 | resource_size_t flash_start, flash_len; | ||
5798 | |||
5799 | static int cards_found; | ||
5800 | u16 aspm_disable_flag = 0; | ||
5801 | int i, err, pci_using_dac; | ||
5802 | u16 eeprom_data = 0; | ||
5803 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | ||
5804 | |||
5805 | if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) | ||
5806 | aspm_disable_flag = PCIE_LINK_STATE_L0S; | ||
5807 | if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) | ||
5808 | aspm_disable_flag |= PCIE_LINK_STATE_L1; | ||
5809 | if (aspm_disable_flag) | ||
5810 | e1000e_disable_aspm(pdev, aspm_disable_flag); | ||
5811 | |||
5812 | err = pci_enable_device_mem(pdev); | ||
5813 | if (err) | ||
5814 | return err; | ||
5815 | |||
5816 | pci_using_dac = 0; | ||
5817 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
5818 | if (!err) { | ||
5819 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
5820 | if (!err) | ||
5821 | pci_using_dac = 1; | ||
5822 | } else { | ||
5823 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
5824 | if (err) { | ||
5825 | err = dma_set_coherent_mask(&pdev->dev, | ||
5826 | DMA_BIT_MASK(32)); | ||
5827 | if (err) { | ||
5828 | dev_err(&pdev->dev, "No usable DMA " | ||
5829 | "configuration, aborting\n"); | ||
5830 | goto err_dma; | ||
5831 | } | ||
5832 | } | ||
5833 | } | ||
5834 | |||
5835 | err = pci_request_selected_regions_exclusive(pdev, | ||
5836 | pci_select_bars(pdev, IORESOURCE_MEM), | ||
5837 | e1000e_driver_name); | ||
5838 | if (err) | ||
5839 | goto err_pci_reg; | ||
5840 | |||
5841 | /* AER (Advanced Error Reporting) hooks */ | ||
5842 | pci_enable_pcie_error_reporting(pdev); | ||
5843 | |||
5844 | pci_set_master(pdev); | ||
5845 | /* PCI config space info */ | ||
5846 | err = pci_save_state(pdev); | ||
5847 | if (err) | ||
5848 | goto err_alloc_etherdev; | ||
5849 | |||
5850 | err = -ENOMEM; | ||
5851 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); | ||
5852 | if (!netdev) | ||
5853 | goto err_alloc_etherdev; | ||
5854 | |||
5855 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
5856 | |||
5857 | netdev->irq = pdev->irq; | ||
5858 | |||
5859 | pci_set_drvdata(pdev, netdev); | ||
5860 | adapter = netdev_priv(netdev); | ||
5861 | hw = &adapter->hw; | ||
5862 | adapter->netdev = netdev; | ||
5863 | adapter->pdev = pdev; | ||
5864 | adapter->ei = ei; | ||
5865 | adapter->pba = ei->pba; | ||
5866 | adapter->flags = ei->flags; | ||
5867 | adapter->flags2 = ei->flags2; | ||
5868 | adapter->hw.adapter = adapter; | ||
5869 | adapter->hw.mac.type = ei->mac; | ||
5870 | adapter->max_hw_frame_size = ei->max_hw_frame_size; | ||
5871 | adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; | ||
5872 | |||
5873 | mmio_start = pci_resource_start(pdev, 0); | ||
5874 | mmio_len = pci_resource_len(pdev, 0); | ||
5875 | |||
5876 | err = -EIO; | ||
5877 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); | ||
5878 | if (!adapter->hw.hw_addr) | ||
5879 | goto err_ioremap; | ||
5880 | |||
5881 | if ((adapter->flags & FLAG_HAS_FLASH) && | ||
5882 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | ||
5883 | flash_start = pci_resource_start(pdev, 1); | ||
5884 | flash_len = pci_resource_len(pdev, 1); | ||
5885 | adapter->hw.flash_address = ioremap(flash_start, flash_len); | ||
5886 | if (!adapter->hw.flash_address) | ||
5887 | goto err_flashmap; | ||
5888 | } | ||
5889 | |||
5890 | /* construct the net_device struct */ | ||
5891 | netdev->netdev_ops = &e1000e_netdev_ops; | ||
5892 | e1000e_set_ethtool_ops(netdev); | ||
5893 | netdev->watchdog_timeo = 5 * HZ; | ||
5894 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); | ||
5895 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | ||
5896 | |||
5897 | netdev->mem_start = mmio_start; | ||
5898 | netdev->mem_end = mmio_start + mmio_len; | ||
5899 | |||
5900 | adapter->bd_number = cards_found++; | ||
5901 | |||
5902 | e1000e_check_options(adapter); | ||
5903 | |||
5904 | /* setup adapter struct */ | ||
5905 | err = e1000_sw_init(adapter); | ||
5906 | if (err) | ||
5907 | goto err_sw_init; | ||
5908 | |||
5909 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); | ||
5910 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); | ||
5911 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | ||
5912 | |||
5913 | err = ei->get_variants(adapter); | ||
5914 | if (err) | ||
5915 | goto err_hw_init; | ||
5916 | |||
5917 | if ((adapter->flags & FLAG_IS_ICH) && | ||
5918 | (adapter->flags & FLAG_READ_ONLY_NVM)) | ||
5919 | e1000e_write_protect_nvm_ich8lan(&adapter->hw); | ||
5920 | |||
5921 | hw->mac.ops.get_bus_info(&adapter->hw); | ||
5922 | |||
5923 | adapter->hw.phy.autoneg_wait_to_complete = 0; | ||
5924 | |||
5925 | /* Copper options */ | ||
5926 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { | ||
5927 | adapter->hw.phy.mdix = AUTO_ALL_MODES; | ||
5928 | adapter->hw.phy.disable_polarity_correction = 0; | ||
5929 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | ||
5930 | } | ||
5931 | |||
5932 | if (e1000_check_reset_block(&adapter->hw)) | ||
5933 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); | ||
5934 | |||
5935 | netdev->features = NETIF_F_SG | | ||
5936 | NETIF_F_HW_CSUM | | ||
5937 | NETIF_F_HW_VLAN_TX | | ||
5938 | NETIF_F_HW_VLAN_RX; | ||
5939 | |||
5940 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) | ||
5941 | netdev->features |= NETIF_F_HW_VLAN_FILTER; | ||
5942 | |||
5943 | netdev->features |= NETIF_F_TSO; | ||
5944 | netdev->features |= NETIF_F_TSO6; | ||
5945 | |||
5946 | netdev->vlan_features |= NETIF_F_TSO; | ||
5947 | netdev->vlan_features |= NETIF_F_TSO6; | ||
5948 | netdev->vlan_features |= NETIF_F_HW_CSUM; | ||
5949 | netdev->vlan_features |= NETIF_F_SG; | ||
5950 | |||
5951 | if (pci_using_dac) { | ||
5952 | netdev->features |= NETIF_F_HIGHDMA; | ||
5953 | netdev->vlan_features |= NETIF_F_HIGHDMA; | ||
5954 | } | ||
5955 | |||
5956 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) | ||
5957 | adapter->flags |= FLAG_MNG_PT_ENABLED; | ||
5958 | |||
5959 | /* | ||
5960 | * before reading the NVM, reset the controller to | ||
5961 | * put the device in a known good starting state | ||
5962 | */ | ||
5963 | adapter->hw.mac.ops.reset_hw(&adapter->hw); | ||
5964 | |||
5965 | /* | ||
5966 | * systems with ASPM and others may see the checksum fail on the first | ||
5967 | * attempt. Let's give it a few tries | ||
5968 | */ | ||
5969 | for (i = 0;; i++) { | ||
5970 | if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) | ||
5971 | break; | ||
5972 | if (i == 2) { | ||
5973 | e_err("The NVM Checksum Is Not Valid\n"); | ||
5974 | err = -EIO; | ||
5975 | goto err_eeprom; | ||
5976 | } | ||
5977 | } | ||
5978 | |||
5979 | e1000_eeprom_checks(adapter); | ||
5980 | |||
5981 | /* copy the MAC address */ | ||
5982 | if (e1000e_read_mac_addr(&adapter->hw)) | ||
5983 | e_err("NVM Read Error while reading MAC address\n"); | ||
5984 | |||
5985 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | ||
5986 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); | ||
5987 | |||
5988 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
5989 | e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); | ||
5990 | err = -EIO; | ||
5991 | goto err_eeprom; | ||
5992 | } | ||
5993 | |||
5994 | init_timer(&adapter->watchdog_timer); | ||
5995 | adapter->watchdog_timer.function = e1000_watchdog; | ||
5996 | adapter->watchdog_timer.data = (unsigned long) adapter; | ||
5997 | |||
5998 | init_timer(&adapter->phy_info_timer); | ||
5999 | adapter->phy_info_timer.function = e1000_update_phy_info; | ||
6000 | adapter->phy_info_timer.data = (unsigned long) adapter; | ||
6001 | |||
6002 | INIT_WORK(&adapter->reset_task, e1000_reset_task); | ||
6003 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | ||
6004 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | ||
6005 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); | ||
6006 | INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); | ||
6007 | |||
6008 | /* Initialize link parameters. User can change them with ethtool */ | ||
6009 | adapter->hw.mac.autoneg = 1; | ||
6010 | adapter->fc_autoneg = 1; | ||
6011 | adapter->hw.fc.requested_mode = e1000_fc_default; | ||
6012 | adapter->hw.fc.current_mode = e1000_fc_default; | ||
6013 | adapter->hw.phy.autoneg_advertised = 0x2f; | ||
6014 | |||
6015 | /* ring size defaults */ | ||
6016 | adapter->rx_ring->count = 256; | ||
6017 | adapter->tx_ring->count = 256; | ||
6018 | |||
6019 | /* | ||
6020 | * Initial Wake on LAN setting - If APM wake is enabled in | ||
6021 | * the EEPROM, enable the ACPI Magic Packet filter | ||
6022 | */ | ||
6023 | if (adapter->flags & FLAG_APME_IN_WUC) { | ||
6024 | /* APME bit in EEPROM is mapped to WUC.APME */ | ||
6025 | eeprom_data = er32(WUC); | ||
6026 | eeprom_apme_mask = E1000_WUC_APME; | ||
6027 | if ((hw->mac.type > e1000_ich10lan) && | ||
6028 | (eeprom_data & E1000_WUC_PHY_WAKE)) | ||
6029 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; | ||
6030 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | ||
6031 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | ||
6032 | (adapter->hw.bus.func == 1)) | ||
6033 | e1000_read_nvm(&adapter->hw, | ||
6034 | NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | ||
6035 | else | ||
6036 | e1000_read_nvm(&adapter->hw, | ||
6037 | NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | ||
6038 | } | ||
6039 | |||
6040 | /* fetch WoL from EEPROM */ | ||
6041 | if (eeprom_data & eeprom_apme_mask) | ||
6042 | adapter->eeprom_wol |= E1000_WUFC_MAG; | ||
6043 | |||
6044 | /* | ||
6045 | * now that we have the eeprom settings, apply the special cases | ||
6046 | * where the eeprom may be wrong or the board simply won't support | ||
6047 | * wake on lan on a particular port | ||
6048 | */ | ||
6049 | if (!(adapter->flags & FLAG_HAS_WOL)) | ||
6050 | adapter->eeprom_wol = 0; | ||
6051 | |||
6052 | /* initialize the wol settings based on the eeprom settings */ | ||
6053 | adapter->wol = adapter->eeprom_wol; | ||
6054 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
6055 | |||
6056 | /* save off EEPROM version number */ | ||
6057 | e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); | ||
6058 | |||
6059 | /* reset the hardware with the new settings */ | ||
6060 | e1000e_reset(adapter); | ||
6061 | |||
6062 | /* | ||
6063 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
6064 | * is up. For all other cases, let the f/w know that the h/w is now | ||
6065 | * under the control of the driver. | ||
6066 | */ | ||
6067 | if (!(adapter->flags & FLAG_HAS_AMT)) | ||
6068 | e1000e_get_hw_control(adapter); | ||
6069 | |||
6070 | strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); | ||
6071 | err = register_netdev(netdev); | ||
6072 | if (err) | ||
6073 | goto err_register; | ||
6074 | |||
6075 | /* carrier off reporting is important to ethtool even BEFORE open */ | ||
6076 | netif_carrier_off(netdev); | ||
6077 | |||
6078 | e1000_print_device_info(adapter); | ||
6079 | |||
6080 | if (pci_dev_run_wake(pdev)) | ||
6081 | pm_runtime_put_noidle(&pdev->dev); | ||
6082 | |||
6083 | return 0; | ||
6084 | |||
6085 | err_register: | ||
6086 | if (!(adapter->flags & FLAG_HAS_AMT)) | ||
6087 | e1000e_release_hw_control(adapter); | ||
6088 | err_eeprom: | ||
6089 | if (!e1000_check_reset_block(&adapter->hw)) | ||
6090 | e1000_phy_hw_reset(&adapter->hw); | ||
6091 | err_hw_init: | ||
6092 | kfree(adapter->tx_ring); | ||
6093 | kfree(adapter->rx_ring); | ||
6094 | err_sw_init: | ||
6095 | if (adapter->hw.flash_address) | ||
6096 | iounmap(adapter->hw.flash_address); | ||
6097 | e1000e_reset_interrupt_capability(adapter); | ||
6098 | err_flashmap: | ||
6099 | iounmap(adapter->hw.hw_addr); | ||
6100 | err_ioremap: | ||
6101 | free_netdev(netdev); | ||
6102 | err_alloc_etherdev: | ||
6103 | pci_release_selected_regions(pdev, | ||
6104 | pci_select_bars(pdev, IORESOURCE_MEM)); | ||
6105 | err_pci_reg: | ||
6106 | err_dma: | ||
6107 | pci_disable_device(pdev); | ||
6108 | return err; | ||
6109 | } | ||
6110 | |||
6111 | /** | ||
6112 | * e1000_remove - Device Removal Routine | ||
6113 | * @pdev: PCI device information struct | ||
6114 | * | ||
6115 | * e1000_remove is called by the PCI subsystem to alert the driver | ||
6116 | * that it should release a PCI device. The could be caused by a | ||
6117 | * Hot-Plug event, or because the driver is going to be removed from | ||
6118 | * memory. | ||
6119 | **/ | ||
6120 | static void __devexit e1000_remove(struct pci_dev *pdev) | ||
6121 | { | ||
6122 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
6123 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
6124 | bool down = test_bit(__E1000_DOWN, &adapter->state); | ||
6125 | |||
6126 | /* | ||
6127 | * The timers may be rescheduled, so explicitly disable them | ||
6128 | * from being rescheduled. | ||
6129 | */ | ||
6130 | if (!down) | ||
6131 | set_bit(__E1000_DOWN, &adapter->state); | ||
6132 | del_timer_sync(&adapter->watchdog_timer); | ||
6133 | del_timer_sync(&adapter->phy_info_timer); | ||
6134 | |||
6135 | cancel_work_sync(&adapter->reset_task); | ||
6136 | cancel_work_sync(&adapter->watchdog_task); | ||
6137 | cancel_work_sync(&adapter->downshift_task); | ||
6138 | cancel_work_sync(&adapter->update_phy_task); | ||
6139 | cancel_work_sync(&adapter->print_hang_task); | ||
6140 | |||
6141 | if (!(netdev->flags & IFF_UP)) | ||
6142 | e1000_power_down_phy(adapter); | ||
6143 | |||
6144 | /* Don't lie to e1000_close() down the road. */ | ||
6145 | if (!down) | ||
6146 | clear_bit(__E1000_DOWN, &adapter->state); | ||
6147 | unregister_netdev(netdev); | ||
6148 | |||
6149 | if (pci_dev_run_wake(pdev)) | ||
6150 | pm_runtime_get_noresume(&pdev->dev); | ||
6151 | |||
6152 | /* | ||
6153 | * Release control of h/w to f/w. If f/w is AMT enabled, this | ||
6154 | * would have already happened in close and is redundant. | ||
6155 | */ | ||
6156 | e1000e_release_hw_control(adapter); | ||
6157 | |||
6158 | e1000e_reset_interrupt_capability(adapter); | ||
6159 | kfree(adapter->tx_ring); | ||
6160 | kfree(adapter->rx_ring); | ||
6161 | |||
6162 | iounmap(adapter->hw.hw_addr); | ||
6163 | if (adapter->hw.flash_address) | ||
6164 | iounmap(adapter->hw.flash_address); | ||
6165 | pci_release_selected_regions(pdev, | ||
6166 | pci_select_bars(pdev, IORESOURCE_MEM)); | ||
6167 | |||
6168 | free_netdev(netdev); | ||
6169 | |||
6170 | /* AER disable */ | ||
6171 | pci_disable_pcie_error_reporting(pdev); | ||
6172 | |||
6173 | pci_disable_device(pdev); | ||
6174 | } | ||
6175 | |||
6176 | /* PCI Error Recovery (ERS) */ | ||
6177 | static struct pci_error_handlers e1000_err_handler = { | ||
6178 | .error_detected = e1000_io_error_detected, | ||
6179 | .slot_reset = e1000_io_slot_reset, | ||
6180 | .resume = e1000_io_resume, | ||
6181 | }; | ||
6182 | |||
6183 | static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { | ||
6184 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, | ||
6185 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, | ||
6186 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, | ||
6187 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, | ||
6188 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, | ||
6189 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, | ||
6190 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, | ||
6191 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, | ||
6192 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, | ||
6193 | |||
6194 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, | ||
6195 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, | ||
6196 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, | ||
6197 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, | ||
6198 | |||
6199 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, | ||
6200 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, | ||
6201 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, | ||
6202 | |||
6203 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, | ||
6204 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, | ||
6205 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, | ||
6206 | |||
6207 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), | ||
6208 | board_80003es2lan }, | ||
6209 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), | ||
6210 | board_80003es2lan }, | ||
6211 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), | ||
6212 | board_80003es2lan }, | ||
6213 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), | ||
6214 | board_80003es2lan }, | ||
6215 | |||
6216 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, | ||
6217 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, | ||
6218 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, | ||
6219 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, | ||
6220 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | ||
6221 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | ||
6222 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | ||
6223 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, | ||
6224 | |||
6225 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, | ||
6226 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | ||
6227 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, | ||
6228 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, | ||
6229 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, | ||
6230 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, | ||
6231 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, | ||
6232 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, | ||
6233 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, | ||
6234 | |||
6235 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, | ||
6236 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, | ||
6237 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, | ||
6238 | |||
6239 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, | ||
6240 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, | ||
6241 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, | ||
6242 | |||
6243 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, | ||
6244 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, | ||
6245 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, | ||
6246 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, | ||
6247 | |||
6248 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, | ||
6249 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, | ||
6250 | |||
6251 | { } /* terminate list */ | ||
6252 | }; | ||
6253 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | ||
6254 | |||
6255 | #ifdef CONFIG_PM | ||
6256 | static const struct dev_pm_ops e1000_pm_ops = { | ||
6257 | SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) | ||
6258 | SET_RUNTIME_PM_OPS(e1000_runtime_suspend, | ||
6259 | e1000_runtime_resume, e1000_idle) | ||
6260 | }; | ||
6261 | #endif | ||
6262 | |||
6263 | /* PCI Device API Driver */ | ||
6264 | static struct pci_driver e1000_driver = { | ||
6265 | .name = e1000e_driver_name, | ||
6266 | .id_table = e1000_pci_tbl, | ||
6267 | .probe = e1000_probe, | ||
6268 | .remove = __devexit_p(e1000_remove), | ||
6269 | #ifdef CONFIG_PM | ||
6270 | .driver.pm = &e1000_pm_ops, | ||
6271 | #endif | ||
6272 | .shutdown = e1000_shutdown, | ||
6273 | .err_handler = &e1000_err_handler | ||
6274 | }; | ||
6275 | |||
6276 | /** | ||
6277 | * e1000_init_module - Driver Registration Routine | ||
6278 | * | ||
6279 | * e1000_init_module is the first routine called when the driver is | ||
6280 | * loaded. All it does is register with the PCI subsystem. | ||
6281 | **/ | ||
6282 | static int __init e1000_init_module(void) | ||
6283 | { | ||
6284 | int ret; | ||
6285 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | ||
6286 | e1000e_driver_version); | ||
6287 | pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); | ||
6288 | ret = pci_register_driver(&e1000_driver); | ||
6289 | |||
6290 | return ret; | ||
6291 | } | ||
6292 | module_init(e1000_init_module); | ||
6293 | |||
6294 | /** | ||
6295 | * e1000_exit_module - Driver Exit Cleanup Routine | ||
6296 | * | ||
6297 | * e1000_exit_module is called just before the driver is removed | ||
6298 | * from memory. | ||
6299 | **/ | ||
6300 | static void __exit e1000_exit_module(void) | ||
6301 | { | ||
6302 | pci_unregister_driver(&e1000_driver); | ||
6303 | } | ||
6304 | module_exit(e1000_exit_module); | ||
6305 | |||
6306 | |||
6307 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | ||
6308 | MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); | ||
6309 | MODULE_LICENSE("GPL"); | ||
6310 | MODULE_VERSION(DRV_VERSION); | ||
6311 | |||
6312 | /* e1000_main.c */ | ||
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c new file mode 100644 index 000000000000..4dd9b63273f6 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/param.c | |||
@@ -0,0 +1,478 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/pci.h> | ||
31 | |||
32 | #include "e1000.h" | ||
33 | |||
34 | /* | ||
35 | * This is the only thing that needs to be changed to adjust the | ||
36 | * maximum number of ports that the driver can manage. | ||
37 | */ | ||
38 | |||
39 | #define E1000_MAX_NIC 32 | ||
40 | |||
41 | #define OPTION_UNSET -1 | ||
42 | #define OPTION_DISABLED 0 | ||
43 | #define OPTION_ENABLED 1 | ||
44 | |||
45 | #define COPYBREAK_DEFAULT 256 | ||
46 | unsigned int copybreak = COPYBREAK_DEFAULT; | ||
47 | module_param(copybreak, uint, 0644); | ||
48 | MODULE_PARM_DESC(copybreak, | ||
49 | "Maximum size of packet that is copied to a new buffer on receive"); | ||
50 | |||
51 | /* | ||
52 | * All parameters are treated the same, as an integer array of values. | ||
53 | * This macro just reduces the need to repeat the same declaration code | ||
54 | * over and over (plus this helps to avoid typo bugs). | ||
55 | */ | ||
56 | |||
57 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } | ||
58 | #define E1000_PARAM(X, desc) \ | ||
59 | static int __devinitdata X[E1000_MAX_NIC+1] \ | ||
60 | = E1000_PARAM_INIT; \ | ||
61 | static unsigned int num_##X; \ | ||
62 | module_param_array_named(X, X, int, &num_##X, 0); \ | ||
63 | MODULE_PARM_DESC(X, desc); | ||
64 | |||
65 | /* | ||
66 | * Transmit Interrupt Delay in units of 1.024 microseconds | ||
67 | * Tx interrupt delay needs to typically be set to something non-zero | ||
68 | * | ||
69 | * Valid Range: 0-65535 | ||
70 | */ | ||
71 | E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); | ||
72 | #define DEFAULT_TIDV 8 | ||
73 | #define MAX_TXDELAY 0xFFFF | ||
74 | #define MIN_TXDELAY 0 | ||
75 | |||
76 | /* | ||
77 | * Transmit Absolute Interrupt Delay in units of 1.024 microseconds | ||
78 | * | ||
79 | * Valid Range: 0-65535 | ||
80 | */ | ||
81 | E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); | ||
82 | #define DEFAULT_TADV 32 | ||
83 | #define MAX_TXABSDELAY 0xFFFF | ||
84 | #define MIN_TXABSDELAY 0 | ||
85 | |||
86 | /* | ||
87 | * Receive Interrupt Delay in units of 1.024 microseconds | ||
88 | * hardware will likely hang if you set this to anything but zero. | ||
89 | * | ||
90 | * Valid Range: 0-65535 | ||
91 | */ | ||
92 | E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); | ||
93 | #define MAX_RXDELAY 0xFFFF | ||
94 | #define MIN_RXDELAY 0 | ||
95 | |||
96 | /* | ||
97 | * Receive Absolute Interrupt Delay in units of 1.024 microseconds | ||
98 | * | ||
99 | * Valid Range: 0-65535 | ||
100 | */ | ||
101 | E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); | ||
102 | #define MAX_RXABSDELAY 0xFFFF | ||
103 | #define MIN_RXABSDELAY 0 | ||
104 | |||
105 | /* | ||
106 | * Interrupt Throttle Rate (interrupts/sec) | ||
107 | * | ||
108 | * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) | ||
109 | */ | ||
110 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | ||
111 | #define DEFAULT_ITR 3 | ||
112 | #define MAX_ITR 100000 | ||
113 | #define MIN_ITR 100 | ||
114 | |||
115 | /* IntMode (Interrupt Mode) | ||
116 | * | ||
117 | * Valid Range: 0 - 2 | ||
118 | * | ||
119 | * Default Value: 2 (MSI-X) | ||
120 | */ | ||
121 | E1000_PARAM(IntMode, "Interrupt Mode"); | ||
122 | #define MAX_INTMODE 2 | ||
123 | #define MIN_INTMODE 0 | ||
124 | |||
125 | /* | ||
126 | * Enable Smart Power Down of the PHY | ||
127 | * | ||
128 | * Valid Range: 0, 1 | ||
129 | * | ||
130 | * Default Value: 0 (disabled) | ||
131 | */ | ||
132 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | ||
133 | |||
134 | /* | ||
135 | * Enable Kumeran Lock Loss workaround | ||
136 | * | ||
137 | * Valid Range: 0, 1 | ||
138 | * | ||
139 | * Default Value: 1 (enabled) | ||
140 | */ | ||
141 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | ||
142 | |||
143 | /* | ||
144 | * Write Protect NVM | ||
145 | * | ||
146 | * Valid Range: 0, 1 | ||
147 | * | ||
148 | * Default Value: 1 (enabled) | ||
149 | */ | ||
150 | E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); | ||
151 | |||
152 | /* | ||
153 | * Enable CRC Stripping | ||
154 | * | ||
155 | * Valid Range: 0, 1 | ||
156 | * | ||
157 | * Default Value: 1 (enabled) | ||
158 | */ | ||
159 | E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ | ||
160 | "the CRC"); | ||
161 | |||
162 | struct e1000_option { | ||
163 | enum { enable_option, range_option, list_option } type; | ||
164 | const char *name; | ||
165 | const char *err; | ||
166 | int def; | ||
167 | union { | ||
168 | struct { /* range_option info */ | ||
169 | int min; | ||
170 | int max; | ||
171 | } r; | ||
172 | struct { /* list_option info */ | ||
173 | int nr; | ||
174 | struct e1000_opt_list { int i; char *str; } *p; | ||
175 | } l; | ||
176 | } arg; | ||
177 | }; | ||
178 | |||
179 | static int __devinit e1000_validate_option(unsigned int *value, | ||
180 | const struct e1000_option *opt, | ||
181 | struct e1000_adapter *adapter) | ||
182 | { | ||
183 | if (*value == OPTION_UNSET) { | ||
184 | *value = opt->def; | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | switch (opt->type) { | ||
189 | case enable_option: | ||
190 | switch (*value) { | ||
191 | case OPTION_ENABLED: | ||
192 | e_info("%s Enabled\n", opt->name); | ||
193 | return 0; | ||
194 | case OPTION_DISABLED: | ||
195 | e_info("%s Disabled\n", opt->name); | ||
196 | return 0; | ||
197 | } | ||
198 | break; | ||
199 | case range_option: | ||
200 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | ||
201 | e_info("%s set to %i\n", opt->name, *value); | ||
202 | return 0; | ||
203 | } | ||
204 | break; | ||
205 | case list_option: { | ||
206 | int i; | ||
207 | struct e1000_opt_list *ent; | ||
208 | |||
209 | for (i = 0; i < opt->arg.l.nr; i++) { | ||
210 | ent = &opt->arg.l.p[i]; | ||
211 | if (*value == ent->i) { | ||
212 | if (ent->str[0] != '\0') | ||
213 | e_info("%s\n", ent->str); | ||
214 | return 0; | ||
215 | } | ||
216 | } | ||
217 | } | ||
218 | break; | ||
219 | default: | ||
220 | BUG(); | ||
221 | } | ||
222 | |||
223 | e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, | ||
224 | opt->err); | ||
225 | *value = opt->def; | ||
226 | return -1; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * e1000e_check_options - Range Checking for Command Line Parameters | ||
231 | * @adapter: board private structure | ||
232 | * | ||
233 | * This routine checks all command line parameters for valid user | ||
234 | * input. If an invalid value is given, or if no user specified | ||
235 | * value exists, a default value is used. The final value is stored | ||
236 | * in a variable in the adapter structure. | ||
237 | **/ | ||
238 | void __devinit e1000e_check_options(struct e1000_adapter *adapter) | ||
239 | { | ||
240 | struct e1000_hw *hw = &adapter->hw; | ||
241 | int bd = adapter->bd_number; | ||
242 | |||
243 | if (bd >= E1000_MAX_NIC) { | ||
244 | e_notice("Warning: no configuration for board #%i\n", bd); | ||
245 | e_notice("Using defaults for all values\n"); | ||
246 | } | ||
247 | |||
248 | { /* Transmit Interrupt Delay */ | ||
249 | static const struct e1000_option opt = { | ||
250 | .type = range_option, | ||
251 | .name = "Transmit Interrupt Delay", | ||
252 | .err = "using default of " | ||
253 | __MODULE_STRING(DEFAULT_TIDV), | ||
254 | .def = DEFAULT_TIDV, | ||
255 | .arg = { .r = { .min = MIN_TXDELAY, | ||
256 | .max = MAX_TXDELAY } } | ||
257 | }; | ||
258 | |||
259 | if (num_TxIntDelay > bd) { | ||
260 | adapter->tx_int_delay = TxIntDelay[bd]; | ||
261 | e1000_validate_option(&adapter->tx_int_delay, &opt, | ||
262 | adapter); | ||
263 | } else { | ||
264 | adapter->tx_int_delay = opt.def; | ||
265 | } | ||
266 | } | ||
267 | { /* Transmit Absolute Interrupt Delay */ | ||
268 | static const struct e1000_option opt = { | ||
269 | .type = range_option, | ||
270 | .name = "Transmit Absolute Interrupt Delay", | ||
271 | .err = "using default of " | ||
272 | __MODULE_STRING(DEFAULT_TADV), | ||
273 | .def = DEFAULT_TADV, | ||
274 | .arg = { .r = { .min = MIN_TXABSDELAY, | ||
275 | .max = MAX_TXABSDELAY } } | ||
276 | }; | ||
277 | |||
278 | if (num_TxAbsIntDelay > bd) { | ||
279 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; | ||
280 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, | ||
281 | adapter); | ||
282 | } else { | ||
283 | adapter->tx_abs_int_delay = opt.def; | ||
284 | } | ||
285 | } | ||
286 | { /* Receive Interrupt Delay */ | ||
287 | static struct e1000_option opt = { | ||
288 | .type = range_option, | ||
289 | .name = "Receive Interrupt Delay", | ||
290 | .err = "using default of " | ||
291 | __MODULE_STRING(DEFAULT_RDTR), | ||
292 | .def = DEFAULT_RDTR, | ||
293 | .arg = { .r = { .min = MIN_RXDELAY, | ||
294 | .max = MAX_RXDELAY } } | ||
295 | }; | ||
296 | |||
297 | if (num_RxIntDelay > bd) { | ||
298 | adapter->rx_int_delay = RxIntDelay[bd]; | ||
299 | e1000_validate_option(&adapter->rx_int_delay, &opt, | ||
300 | adapter); | ||
301 | } else { | ||
302 | adapter->rx_int_delay = opt.def; | ||
303 | } | ||
304 | } | ||
305 | { /* Receive Absolute Interrupt Delay */ | ||
306 | static const struct e1000_option opt = { | ||
307 | .type = range_option, | ||
308 | .name = "Receive Absolute Interrupt Delay", | ||
309 | .err = "using default of " | ||
310 | __MODULE_STRING(DEFAULT_RADV), | ||
311 | .def = DEFAULT_RADV, | ||
312 | .arg = { .r = { .min = MIN_RXABSDELAY, | ||
313 | .max = MAX_RXABSDELAY } } | ||
314 | }; | ||
315 | |||
316 | if (num_RxAbsIntDelay > bd) { | ||
317 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; | ||
318 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, | ||
319 | adapter); | ||
320 | } else { | ||
321 | adapter->rx_abs_int_delay = opt.def; | ||
322 | } | ||
323 | } | ||
324 | { /* Interrupt Throttling Rate */ | ||
325 | static const struct e1000_option opt = { | ||
326 | .type = range_option, | ||
327 | .name = "Interrupt Throttling Rate (ints/sec)", | ||
328 | .err = "using default of " | ||
329 | __MODULE_STRING(DEFAULT_ITR), | ||
330 | .def = DEFAULT_ITR, | ||
331 | .arg = { .r = { .min = MIN_ITR, | ||
332 | .max = MAX_ITR } } | ||
333 | }; | ||
334 | |||
335 | if (num_InterruptThrottleRate > bd) { | ||
336 | adapter->itr = InterruptThrottleRate[bd]; | ||
337 | switch (adapter->itr) { | ||
338 | case 0: | ||
339 | e_info("%s turned off\n", opt.name); | ||
340 | break; | ||
341 | case 1: | ||
342 | e_info("%s set to dynamic mode\n", opt.name); | ||
343 | adapter->itr_setting = adapter->itr; | ||
344 | adapter->itr = 20000; | ||
345 | break; | ||
346 | case 3: | ||
347 | e_info("%s set to dynamic conservative mode\n", | ||
348 | opt.name); | ||
349 | adapter->itr_setting = adapter->itr; | ||
350 | adapter->itr = 20000; | ||
351 | break; | ||
352 | case 4: | ||
353 | e_info("%s set to simplified (2000-8000 ints) " | ||
354 | "mode\n", opt.name); | ||
355 | adapter->itr_setting = 4; | ||
356 | break; | ||
357 | default: | ||
358 | /* | ||
359 | * Save the setting, because the dynamic bits | ||
360 | * change itr. | ||
361 | */ | ||
362 | if (e1000_validate_option(&adapter->itr, &opt, | ||
363 | adapter) && | ||
364 | (adapter->itr == 3)) { | ||
365 | /* | ||
366 | * In case of invalid user value, | ||
367 | * default to conservative mode. | ||
368 | */ | ||
369 | adapter->itr_setting = adapter->itr; | ||
370 | adapter->itr = 20000; | ||
371 | } else { | ||
372 | /* | ||
373 | * Clear the lower two bits because | ||
374 | * they are used as control. | ||
375 | */ | ||
376 | adapter->itr_setting = | ||
377 | adapter->itr & ~3; | ||
378 | } | ||
379 | break; | ||
380 | } | ||
381 | } else { | ||
382 | adapter->itr_setting = opt.def; | ||
383 | adapter->itr = 20000; | ||
384 | } | ||
385 | } | ||
386 | { /* Interrupt Mode */ | ||
387 | static struct e1000_option opt = { | ||
388 | .type = range_option, | ||
389 | .name = "Interrupt Mode", | ||
390 | .err = "defaulting to 2 (MSI-X)", | ||
391 | .def = E1000E_INT_MODE_MSIX, | ||
392 | .arg = { .r = { .min = MIN_INTMODE, | ||
393 | .max = MAX_INTMODE } } | ||
394 | }; | ||
395 | |||
396 | if (num_IntMode > bd) { | ||
397 | unsigned int int_mode = IntMode[bd]; | ||
398 | e1000_validate_option(&int_mode, &opt, adapter); | ||
399 | adapter->int_mode = int_mode; | ||
400 | } else { | ||
401 | adapter->int_mode = opt.def; | ||
402 | } | ||
403 | } | ||
404 | { /* Smart Power Down */ | ||
405 | static const struct e1000_option opt = { | ||
406 | .type = enable_option, | ||
407 | .name = "PHY Smart Power Down", | ||
408 | .err = "defaulting to Disabled", | ||
409 | .def = OPTION_DISABLED | ||
410 | }; | ||
411 | |||
412 | if (num_SmartPowerDownEnable > bd) { | ||
413 | unsigned int spd = SmartPowerDownEnable[bd]; | ||
414 | e1000_validate_option(&spd, &opt, adapter); | ||
415 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) | ||
416 | && spd) | ||
417 | adapter->flags |= FLAG_SMART_POWER_DOWN; | ||
418 | } | ||
419 | } | ||
420 | { /* CRC Stripping */ | ||
421 | static const struct e1000_option opt = { | ||
422 | .type = enable_option, | ||
423 | .name = "CRC Stripping", | ||
424 | .err = "defaulting to Enabled", | ||
425 | .def = OPTION_ENABLED | ||
426 | }; | ||
427 | |||
428 | if (num_CrcStripping > bd) { | ||
429 | unsigned int crc_stripping = CrcStripping[bd]; | ||
430 | e1000_validate_option(&crc_stripping, &opt, adapter); | ||
431 | if (crc_stripping == OPTION_ENABLED) | ||
432 | adapter->flags2 |= FLAG2_CRC_STRIPPING; | ||
433 | } else { | ||
434 | adapter->flags2 |= FLAG2_CRC_STRIPPING; | ||
435 | } | ||
436 | } | ||
437 | { /* Kumeran Lock Loss Workaround */ | ||
438 | static const struct e1000_option opt = { | ||
439 | .type = enable_option, | ||
440 | .name = "Kumeran Lock Loss Workaround", | ||
441 | .err = "defaulting to Enabled", | ||
442 | .def = OPTION_ENABLED | ||
443 | }; | ||
444 | |||
445 | if (num_KumeranLockLoss > bd) { | ||
446 | unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; | ||
447 | e1000_validate_option(&kmrn_lock_loss, &opt, adapter); | ||
448 | if (hw->mac.type == e1000_ich8lan) | ||
449 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, | ||
450 | kmrn_lock_loss); | ||
451 | } else { | ||
452 | if (hw->mac.type == e1000_ich8lan) | ||
453 | e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, | ||
454 | opt.def); | ||
455 | } | ||
456 | } | ||
457 | { /* Write-protect NVM */ | ||
458 | static const struct e1000_option opt = { | ||
459 | .type = enable_option, | ||
460 | .name = "Write-protect NVM", | ||
461 | .err = "defaulting to Enabled", | ||
462 | .def = OPTION_ENABLED | ||
463 | }; | ||
464 | |||
465 | if (adapter->flags & FLAG_IS_ICH) { | ||
466 | if (num_WriteProtectNVM > bd) { | ||
467 | unsigned int write_protect_nvm = WriteProtectNVM[bd]; | ||
468 | e1000_validate_option(&write_protect_nvm, &opt, | ||
469 | adapter); | ||
470 | if (write_protect_nvm) | ||
471 | adapter->flags |= FLAG_READ_ONLY_NVM; | ||
472 | } else { | ||
473 | if (opt.def) | ||
474 | adapter->flags |= FLAG_READ_ONLY_NVM; | ||
475 | } | ||
476 | } | ||
477 | } | ||
478 | } | ||
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c new file mode 100644 index 000000000000..8666476cb9be --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -0,0 +1,3377 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2011 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include <linux/delay.h> | ||
30 | |||
31 | #include "e1000.h" | ||
32 | |||
33 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); | ||
34 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); | ||
35 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); | ||
36 | static s32 e1000_wait_autoneg(struct e1000_hw *hw); | ||
37 | static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); | ||
38 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | ||
39 | u16 *data, bool read, bool page_set); | ||
40 | static u32 e1000_get_phy_addr_for_hv_page(u32 page); | ||
41 | static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | ||
42 | u16 *data, bool read); | ||
43 | |||
44 | /* Cable length tables */ | ||
45 | static const u16 e1000_m88_cable_length_table[] = { | ||
46 | 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | ||
47 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | ||
48 | ARRAY_SIZE(e1000_m88_cable_length_table) | ||
49 | |||
50 | static const u16 e1000_igp_2_cable_length_table[] = { | ||
51 | 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, | ||
52 | 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, | ||
53 | 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, | ||
54 | 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, | ||
55 | 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, | ||
56 | 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, | ||
57 | 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, | ||
58 | 124}; | ||
59 | #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ | ||
60 | ARRAY_SIZE(e1000_igp_2_cable_length_table) | ||
61 | |||
62 | #define BM_PHY_REG_PAGE(offset) \ | ||
63 | ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) | ||
64 | #define BM_PHY_REG_NUM(offset) \ | ||
65 | ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ | ||
66 | (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ | ||
67 | ~MAX_PHY_REG_ADDRESS))) | ||
68 | |||
69 | #define HV_INTC_FC_PAGE_START 768 | ||
70 | #define I82578_ADDR_REG 29 | ||
71 | #define I82577_ADDR_REG 16 | ||
72 | #define I82577_CFG_REG 22 | ||
73 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) | ||
74 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ | ||
75 | #define I82577_CTRL_REG 23 | ||
76 | |||
77 | /* 82577 specific PHY registers */ | ||
78 | #define I82577_PHY_CTRL_2 18 | ||
79 | #define I82577_PHY_STATUS_2 26 | ||
80 | #define I82577_PHY_DIAG_STATUS 31 | ||
81 | |||
82 | /* I82577 PHY Status 2 */ | ||
83 | #define I82577_PHY_STATUS2_REV_POLARITY 0x0400 | ||
84 | #define I82577_PHY_STATUS2_MDIX 0x0800 | ||
85 | #define I82577_PHY_STATUS2_SPEED_MASK 0x0300 | ||
86 | #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 | ||
87 | |||
88 | /* I82577 PHY Control 2 */ | ||
89 | #define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 | ||
90 | #define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 | ||
91 | |||
92 | /* I82577 PHY Diagnostics Status */ | ||
93 | #define I82577_DSTATUS_CABLE_LENGTH 0x03FC | ||
94 | #define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 | ||
95 | |||
96 | /* BM PHY Copper Specific Control 1 */ | ||
97 | #define BM_CS_CTRL1 16 | ||
98 | |||
99 | #define HV_MUX_DATA_CTRL PHY_REG(776, 16) | ||
100 | #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 | ||
101 | #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 | ||
102 | |||
103 | /** | ||
104 | * e1000e_check_reset_block_generic - Check if PHY reset is blocked | ||
105 | * @hw: pointer to the HW structure | ||
106 | * | ||
107 | * Read the PHY management control register and check whether a PHY reset | ||
108 | * is blocked. If a reset is not blocked return 0, otherwise | ||
109 | * return E1000_BLK_PHY_RESET (12). | ||
110 | **/ | ||
111 | s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) | ||
112 | { | ||
113 | u32 manc; | ||
114 | |||
115 | manc = er32(MANC); | ||
116 | |||
117 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | ||
118 | E1000_BLK_PHY_RESET : 0; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * e1000e_get_phy_id - Retrieve the PHY ID and revision | ||
123 | * @hw: pointer to the HW structure | ||
124 | * | ||
125 | * Reads the PHY registers and stores the PHY ID and possibly the PHY | ||
126 | * revision in the hardware structure. | ||
127 | **/ | ||
128 | s32 e1000e_get_phy_id(struct e1000_hw *hw) | ||
129 | { | ||
130 | struct e1000_phy_info *phy = &hw->phy; | ||
131 | s32 ret_val = 0; | ||
132 | u16 phy_id; | ||
133 | u16 retry_count = 0; | ||
134 | |||
135 | if (!(phy->ops.read_reg)) | ||
136 | goto out; | ||
137 | |||
138 | while (retry_count < 2) { | ||
139 | ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); | ||
140 | if (ret_val) | ||
141 | goto out; | ||
142 | |||
143 | phy->id = (u32)(phy_id << 16); | ||
144 | udelay(20); | ||
145 | ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); | ||
146 | if (ret_val) | ||
147 | goto out; | ||
148 | |||
149 | phy->id |= (u32)(phy_id & PHY_REVISION_MASK); | ||
150 | phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); | ||
151 | |||
152 | if (phy->id != 0 && phy->id != PHY_REVISION_MASK) | ||
153 | goto out; | ||
154 | |||
155 | retry_count++; | ||
156 | } | ||
157 | out: | ||
158 | return ret_val; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * e1000e_phy_reset_dsp - Reset PHY DSP | ||
163 | * @hw: pointer to the HW structure | ||
164 | * | ||
165 | * Reset the digital signal processor. | ||
166 | **/ | ||
167 | s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) | ||
168 | { | ||
169 | s32 ret_val; | ||
170 | |||
171 | ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); | ||
172 | if (ret_val) | ||
173 | return ret_val; | ||
174 | |||
175 | return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * e1000e_read_phy_reg_mdic - Read MDI control register | ||
180 | * @hw: pointer to the HW structure | ||
181 | * @offset: register offset to be read | ||
182 | * @data: pointer to the read data | ||
183 | * | ||
184 | * Reads the MDI control register in the PHY at offset and stores the | ||
185 | * information read to data. | ||
186 | **/ | ||
187 | s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | ||
188 | { | ||
189 | struct e1000_phy_info *phy = &hw->phy; | ||
190 | u32 i, mdic = 0; | ||
191 | |||
192 | if (offset > MAX_PHY_REG_ADDRESS) { | ||
193 | e_dbg("PHY Address %d is out of range\n", offset); | ||
194 | return -E1000_ERR_PARAM; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
199 | * Control register. The MAC will take care of interfacing with the | ||
200 | * PHY to retrieve the desired data. | ||
201 | */ | ||
202 | mdic = ((offset << E1000_MDIC_REG_SHIFT) | | ||
203 | (phy->addr << E1000_MDIC_PHY_SHIFT) | | ||
204 | (E1000_MDIC_OP_READ)); | ||
205 | |||
206 | ew32(MDIC, mdic); | ||
207 | |||
208 | /* | ||
209 | * Poll the ready bit to see if the MDI read completed | ||
210 | * Increasing the time out as testing showed failures with | ||
211 | * the lower time out | ||
212 | */ | ||
213 | for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { | ||
214 | udelay(50); | ||
215 | mdic = er32(MDIC); | ||
216 | if (mdic & E1000_MDIC_READY) | ||
217 | break; | ||
218 | } | ||
219 | if (!(mdic & E1000_MDIC_READY)) { | ||
220 | e_dbg("MDI Read did not complete\n"); | ||
221 | return -E1000_ERR_PHY; | ||
222 | } | ||
223 | if (mdic & E1000_MDIC_ERROR) { | ||
224 | e_dbg("MDI Error\n"); | ||
225 | return -E1000_ERR_PHY; | ||
226 | } | ||
227 | *data = (u16) mdic; | ||
228 | |||
229 | /* | ||
230 | * Allow some time after each MDIC transaction to avoid | ||
231 | * reading duplicate data in the next MDIC transaction. | ||
232 | */ | ||
233 | if (hw->mac.type == e1000_pch2lan) | ||
234 | udelay(100); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * e1000e_write_phy_reg_mdic - Write MDI control register | ||
241 | * @hw: pointer to the HW structure | ||
242 | * @offset: register offset to write to | ||
243 | * @data: data to write to register at offset | ||
244 | * | ||
245 | * Writes data to MDI control register in the PHY at offset. | ||
246 | **/ | ||
247 | s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | ||
248 | { | ||
249 | struct e1000_phy_info *phy = &hw->phy; | ||
250 | u32 i, mdic = 0; | ||
251 | |||
252 | if (offset > MAX_PHY_REG_ADDRESS) { | ||
253 | e_dbg("PHY Address %d is out of range\n", offset); | ||
254 | return -E1000_ERR_PARAM; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
259 | * Control register. The MAC will take care of interfacing with the | ||
260 | * PHY to retrieve the desired data. | ||
261 | */ | ||
262 | mdic = (((u32)data) | | ||
263 | (offset << E1000_MDIC_REG_SHIFT) | | ||
264 | (phy->addr << E1000_MDIC_PHY_SHIFT) | | ||
265 | (E1000_MDIC_OP_WRITE)); | ||
266 | |||
267 | ew32(MDIC, mdic); | ||
268 | |||
269 | /* | ||
270 | * Poll the ready bit to see if the MDI read completed | ||
271 | * Increasing the time out as testing showed failures with | ||
272 | * the lower time out | ||
273 | */ | ||
274 | for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { | ||
275 | udelay(50); | ||
276 | mdic = er32(MDIC); | ||
277 | if (mdic & E1000_MDIC_READY) | ||
278 | break; | ||
279 | } | ||
280 | if (!(mdic & E1000_MDIC_READY)) { | ||
281 | e_dbg("MDI Write did not complete\n"); | ||
282 | return -E1000_ERR_PHY; | ||
283 | } | ||
284 | if (mdic & E1000_MDIC_ERROR) { | ||
285 | e_dbg("MDI Error\n"); | ||
286 | return -E1000_ERR_PHY; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Allow some time after each MDIC transaction to avoid | ||
291 | * reading duplicate data in the next MDIC transaction. | ||
292 | */ | ||
293 | if (hw->mac.type == e1000_pch2lan) | ||
294 | udelay(100); | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * e1000e_read_phy_reg_m88 - Read m88 PHY register | ||
301 | * @hw: pointer to the HW structure | ||
302 | * @offset: register offset to be read | ||
303 | * @data: pointer to the read data | ||
304 | * | ||
305 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
306 | * and storing the retrieved information in data. Release any acquired | ||
307 | * semaphores before exiting. | ||
308 | **/ | ||
309 | s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) | ||
310 | { | ||
311 | s32 ret_val; | ||
312 | |||
313 | ret_val = hw->phy.ops.acquire(hw); | ||
314 | if (ret_val) | ||
315 | return ret_val; | ||
316 | |||
317 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
318 | data); | ||
319 | |||
320 | hw->phy.ops.release(hw); | ||
321 | |||
322 | return ret_val; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * e1000e_write_phy_reg_m88 - Write m88 PHY register | ||
327 | * @hw: pointer to the HW structure | ||
328 | * @offset: register offset to write to | ||
329 | * @data: data to write at register offset | ||
330 | * | ||
331 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
332 | * at the offset. Release any acquired semaphores before exiting. | ||
333 | **/ | ||
334 | s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) | ||
335 | { | ||
336 | s32 ret_val; | ||
337 | |||
338 | ret_val = hw->phy.ops.acquire(hw); | ||
339 | if (ret_val) | ||
340 | return ret_val; | ||
341 | |||
342 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
343 | data); | ||
344 | |||
345 | hw->phy.ops.release(hw); | ||
346 | |||
347 | return ret_val; | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * e1000_set_page_igp - Set page as on IGP-like PHY(s) | ||
352 | * @hw: pointer to the HW structure | ||
353 | * @page: page to set (shifted left when necessary) | ||
354 | * | ||
355 | * Sets PHY page required for PHY register access. Assumes semaphore is | ||
356 | * already acquired. Note, this function sets phy.addr to 1 so the caller | ||
357 | * must set it appropriately (if necessary) after this function returns. | ||
358 | **/ | ||
359 | s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) | ||
360 | { | ||
361 | e_dbg("Setting page 0x%x\n", page); | ||
362 | |||
363 | hw->phy.addr = 1; | ||
364 | |||
365 | return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * __e1000e_read_phy_reg_igp - Read igp PHY register | ||
370 | * @hw: pointer to the HW structure | ||
371 | * @offset: register offset to be read | ||
372 | * @data: pointer to the read data | ||
373 | * @locked: semaphore has already been acquired or not | ||
374 | * | ||
375 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
376 | * and stores the retrieved information in data. Release any acquired | ||
377 | * semaphores before exiting. | ||
378 | **/ | ||
379 | static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, | ||
380 | bool locked) | ||
381 | { | ||
382 | s32 ret_val = 0; | ||
383 | |||
384 | if (!locked) { | ||
385 | if (!(hw->phy.ops.acquire)) | ||
386 | goto out; | ||
387 | |||
388 | ret_val = hw->phy.ops.acquire(hw); | ||
389 | if (ret_val) | ||
390 | goto out; | ||
391 | } | ||
392 | |||
393 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
394 | ret_val = e1000e_write_phy_reg_mdic(hw, | ||
395 | IGP01E1000_PHY_PAGE_SELECT, | ||
396 | (u16)offset); | ||
397 | if (ret_val) | ||
398 | goto release; | ||
399 | } | ||
400 | |||
401 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
402 | data); | ||
403 | |||
404 | release: | ||
405 | if (!locked) | ||
406 | hw->phy.ops.release(hw); | ||
407 | out: | ||
408 | return ret_val; | ||
409 | } | ||
410 | |||
411 | /** | ||
412 | * e1000e_read_phy_reg_igp - Read igp PHY register | ||
413 | * @hw: pointer to the HW structure | ||
414 | * @offset: register offset to be read | ||
415 | * @data: pointer to the read data | ||
416 | * | ||
417 | * Acquires semaphore then reads the PHY register at offset and stores the | ||
418 | * retrieved information in data. | ||
419 | * Release the acquired semaphore before exiting. | ||
420 | **/ | ||
421 | s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) | ||
422 | { | ||
423 | return __e1000e_read_phy_reg_igp(hw, offset, data, false); | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * e1000e_read_phy_reg_igp_locked - Read igp PHY register | ||
428 | * @hw: pointer to the HW structure | ||
429 | * @offset: register offset to be read | ||
430 | * @data: pointer to the read data | ||
431 | * | ||
432 | * Reads the PHY register at offset and stores the retrieved information | ||
433 | * in data. Assumes semaphore already acquired. | ||
434 | **/ | ||
435 | s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
436 | { | ||
437 | return __e1000e_read_phy_reg_igp(hw, offset, data, true); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * e1000e_write_phy_reg_igp - Write igp PHY register | ||
442 | * @hw: pointer to the HW structure | ||
443 | * @offset: register offset to write to | ||
444 | * @data: data to write at register offset | ||
445 | * @locked: semaphore has already been acquired or not | ||
446 | * | ||
447 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
448 | * at the offset. Release any acquired semaphores before exiting. | ||
449 | **/ | ||
450 | static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, | ||
451 | bool locked) | ||
452 | { | ||
453 | s32 ret_val = 0; | ||
454 | |||
455 | if (!locked) { | ||
456 | if (!(hw->phy.ops.acquire)) | ||
457 | goto out; | ||
458 | |||
459 | ret_val = hw->phy.ops.acquire(hw); | ||
460 | if (ret_val) | ||
461 | goto out; | ||
462 | } | ||
463 | |||
464 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
465 | ret_val = e1000e_write_phy_reg_mdic(hw, | ||
466 | IGP01E1000_PHY_PAGE_SELECT, | ||
467 | (u16)offset); | ||
468 | if (ret_val) | ||
469 | goto release; | ||
470 | } | ||
471 | |||
472 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
473 | data); | ||
474 | |||
475 | release: | ||
476 | if (!locked) | ||
477 | hw->phy.ops.release(hw); | ||
478 | |||
479 | out: | ||
480 | return ret_val; | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * e1000e_write_phy_reg_igp - Write igp PHY register | ||
485 | * @hw: pointer to the HW structure | ||
486 | * @offset: register offset to write to | ||
487 | * @data: data to write at register offset | ||
488 | * | ||
489 | * Acquires semaphore then writes the data to PHY register | ||
490 | * at the offset. Release any acquired semaphores before exiting. | ||
491 | **/ | ||
492 | s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) | ||
493 | { | ||
494 | return __e1000e_write_phy_reg_igp(hw, offset, data, false); | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * e1000e_write_phy_reg_igp_locked - Write igp PHY register | ||
499 | * @hw: pointer to the HW structure | ||
500 | * @offset: register offset to write to | ||
501 | * @data: data to write at register offset | ||
502 | * | ||
503 | * Writes the data to PHY register at the offset. | ||
504 | * Assumes semaphore already acquired. | ||
505 | **/ | ||
506 | s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
507 | { | ||
508 | return __e1000e_write_phy_reg_igp(hw, offset, data, true); | ||
509 | } | ||
510 | |||
511 | /** | ||
512 | * __e1000_read_kmrn_reg - Read kumeran register | ||
513 | * @hw: pointer to the HW structure | ||
514 | * @offset: register offset to be read | ||
515 | * @data: pointer to the read data | ||
516 | * @locked: semaphore has already been acquired or not | ||
517 | * | ||
518 | * Acquires semaphore, if necessary. Then reads the PHY register at offset | ||
519 | * using the kumeran interface. The information retrieved is stored in data. | ||
520 | * Release any acquired semaphores before exiting. | ||
521 | **/ | ||
522 | static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | ||
523 | bool locked) | ||
524 | { | ||
525 | u32 kmrnctrlsta; | ||
526 | s32 ret_val = 0; | ||
527 | |||
528 | if (!locked) { | ||
529 | if (!(hw->phy.ops.acquire)) | ||
530 | goto out; | ||
531 | |||
532 | ret_val = hw->phy.ops.acquire(hw); | ||
533 | if (ret_val) | ||
534 | goto out; | ||
535 | } | ||
536 | |||
537 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | ||
538 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | ||
539 | ew32(KMRNCTRLSTA, kmrnctrlsta); | ||
540 | e1e_flush(); | ||
541 | |||
542 | udelay(2); | ||
543 | |||
544 | kmrnctrlsta = er32(KMRNCTRLSTA); | ||
545 | *data = (u16)kmrnctrlsta; | ||
546 | |||
547 | if (!locked) | ||
548 | hw->phy.ops.release(hw); | ||
549 | |||
550 | out: | ||
551 | return ret_val; | ||
552 | } | ||
553 | |||
554 | /** | ||
555 | * e1000e_read_kmrn_reg - Read kumeran register | ||
556 | * @hw: pointer to the HW structure | ||
557 | * @offset: register offset to be read | ||
558 | * @data: pointer to the read data | ||
559 | * | ||
560 | * Acquires semaphore then reads the PHY register at offset using the | ||
561 | * kumeran interface. The information retrieved is stored in data. | ||
562 | * Release the acquired semaphore before exiting. | ||
563 | **/ | ||
564 | s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) | ||
565 | { | ||
566 | return __e1000_read_kmrn_reg(hw, offset, data, false); | ||
567 | } | ||
568 | |||
569 | /** | ||
570 | * e1000e_read_kmrn_reg_locked - Read kumeran register | ||
571 | * @hw: pointer to the HW structure | ||
572 | * @offset: register offset to be read | ||
573 | * @data: pointer to the read data | ||
574 | * | ||
575 | * Reads the PHY register at offset using the kumeran interface. The | ||
576 | * information retrieved is stored in data. | ||
577 | * Assumes semaphore already acquired. | ||
578 | **/ | ||
579 | s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
580 | { | ||
581 | return __e1000_read_kmrn_reg(hw, offset, data, true); | ||
582 | } | ||
583 | |||
584 | /** | ||
585 | * __e1000_write_kmrn_reg - Write kumeran register | ||
586 | * @hw: pointer to the HW structure | ||
587 | * @offset: register offset to write to | ||
588 | * @data: data to write at register offset | ||
589 | * @locked: semaphore has already been acquired or not | ||
590 | * | ||
591 | * Acquires semaphore, if necessary. Then write the data to PHY register | ||
592 | * at the offset using the kumeran interface. Release any acquired semaphores | ||
593 | * before exiting. | ||
594 | **/ | ||
595 | static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | ||
596 | bool locked) | ||
597 | { | ||
598 | u32 kmrnctrlsta; | ||
599 | s32 ret_val = 0; | ||
600 | |||
601 | if (!locked) { | ||
602 | if (!(hw->phy.ops.acquire)) | ||
603 | goto out; | ||
604 | |||
605 | ret_val = hw->phy.ops.acquire(hw); | ||
606 | if (ret_val) | ||
607 | goto out; | ||
608 | } | ||
609 | |||
610 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | ||
611 | E1000_KMRNCTRLSTA_OFFSET) | data; | ||
612 | ew32(KMRNCTRLSTA, kmrnctrlsta); | ||
613 | e1e_flush(); | ||
614 | |||
615 | udelay(2); | ||
616 | |||
617 | if (!locked) | ||
618 | hw->phy.ops.release(hw); | ||
619 | |||
620 | out: | ||
621 | return ret_val; | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * e1000e_write_kmrn_reg - Write kumeran register | ||
626 | * @hw: pointer to the HW structure | ||
627 | * @offset: register offset to write to | ||
628 | * @data: data to write at register offset | ||
629 | * | ||
630 | * Acquires semaphore then writes the data to the PHY register at the offset | ||
631 | * using the kumeran interface. Release the acquired semaphore before exiting. | ||
632 | **/ | ||
633 | s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) | ||
634 | { | ||
635 | return __e1000_write_kmrn_reg(hw, offset, data, false); | ||
636 | } | ||
637 | |||
638 | /** | ||
639 | * e1000e_write_kmrn_reg_locked - Write kumeran register | ||
640 | * @hw: pointer to the HW structure | ||
641 | * @offset: register offset to write to | ||
642 | * @data: data to write at register offset | ||
643 | * | ||
644 | * Write the data to PHY register at the offset using the kumeran interface. | ||
645 | * Assumes semaphore already acquired. | ||
646 | **/ | ||
647 | s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
648 | { | ||
649 | return __e1000_write_kmrn_reg(hw, offset, data, true); | ||
650 | } | ||
651 | |||
652 | /** | ||
653 | * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link | ||
654 | * @hw: pointer to the HW structure | ||
655 | * | ||
656 | * Sets up Carrier-sense on Transmit and downshift values. | ||
657 | **/ | ||
658 | s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | ||
659 | { | ||
660 | s32 ret_val; | ||
661 | u16 phy_data; | ||
662 | |||
663 | /* Enable CRS on Tx. This must be set for half-duplex operation. */ | ||
664 | ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); | ||
665 | if (ret_val) | ||
666 | goto out; | ||
667 | |||
668 | phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; | ||
669 | |||
670 | /* Enable downshift */ | ||
671 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | ||
672 | |||
673 | ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); | ||
674 | |||
675 | out: | ||
676 | return ret_val; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link | ||
681 | * @hw: pointer to the HW structure | ||
682 | * | ||
683 | * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock | ||
684 | * and downshift values are set also. | ||
685 | **/ | ||
686 | s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | ||
687 | { | ||
688 | struct e1000_phy_info *phy = &hw->phy; | ||
689 | s32 ret_val; | ||
690 | u16 phy_data; | ||
691 | |||
692 | /* Enable CRS on Tx. This must be set for half-duplex operation. */ | ||
693 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
694 | if (ret_val) | ||
695 | return ret_val; | ||
696 | |||
697 | /* For BM PHY this bit is downshift enable */ | ||
698 | if (phy->type != e1000_phy_bm) | ||
699 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | ||
700 | |||
701 | /* | ||
702 | * Options: | ||
703 | * MDI/MDI-X = 0 (default) | ||
704 | * 0 - Auto for all speeds | ||
705 | * 1 - MDI mode | ||
706 | * 2 - MDI-X mode | ||
707 | * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) | ||
708 | */ | ||
709 | phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; | ||
710 | |||
711 | switch (phy->mdix) { | ||
712 | case 1: | ||
713 | phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; | ||
714 | break; | ||
715 | case 2: | ||
716 | phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; | ||
717 | break; | ||
718 | case 3: | ||
719 | phy_data |= M88E1000_PSCR_AUTO_X_1000T; | ||
720 | break; | ||
721 | case 0: | ||
722 | default: | ||
723 | phy_data |= M88E1000_PSCR_AUTO_X_MODE; | ||
724 | break; | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Options: | ||
729 | * disable_polarity_correction = 0 (default) | ||
730 | * Automatic Correction for Reversed Cable Polarity | ||
731 | * 0 - Disabled | ||
732 | * 1 - Enabled | ||
733 | */ | ||
734 | phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; | ||
735 | if (phy->disable_polarity_correction == 1) | ||
736 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; | ||
737 | |||
738 | /* Enable downshift on BM (disabled by default) */ | ||
739 | if (phy->type == e1000_phy_bm) | ||
740 | phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; | ||
741 | |||
742 | ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | ||
743 | if (ret_val) | ||
744 | return ret_val; | ||
745 | |||
746 | if ((phy->type == e1000_phy_m88) && | ||
747 | (phy->revision < E1000_REVISION_4) && | ||
748 | (phy->id != BME1000_E_PHY_ID_R2)) { | ||
749 | /* | ||
750 | * Force TX_CLK in the Extended PHY Specific Control Register | ||
751 | * to 25MHz clock. | ||
752 | */ | ||
753 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
754 | if (ret_val) | ||
755 | return ret_val; | ||
756 | |||
757 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
758 | |||
759 | if ((phy->revision == 2) && | ||
760 | (phy->id == M88E1111_I_PHY_ID)) { | ||
761 | /* 82573L PHY - set the downshift counter to 5x. */ | ||
762 | phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; | ||
763 | phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; | ||
764 | } else { | ||
765 | /* Configure Master and Slave downshift values */ | ||
766 | phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | | ||
767 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); | ||
768 | phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | | ||
769 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); | ||
770 | } | ||
771 | ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | ||
772 | if (ret_val) | ||
773 | return ret_val; | ||
774 | } | ||
775 | |||
776 | if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { | ||
777 | /* Set PHY page 0, register 29 to 0x0003 */ | ||
778 | ret_val = e1e_wphy(hw, 29, 0x0003); | ||
779 | if (ret_val) | ||
780 | return ret_val; | ||
781 | |||
782 | /* Set PHY page 0, register 30 to 0x0000 */ | ||
783 | ret_val = e1e_wphy(hw, 30, 0x0000); | ||
784 | if (ret_val) | ||
785 | return ret_val; | ||
786 | } | ||
787 | |||
788 | /* Commit the changes. */ | ||
789 | ret_val = e1000e_commit_phy(hw); | ||
790 | if (ret_val) { | ||
791 | e_dbg("Error committing the PHY changes\n"); | ||
792 | return ret_val; | ||
793 | } | ||
794 | |||
795 | if (phy->type == e1000_phy_82578) { | ||
796 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
797 | if (ret_val) | ||
798 | return ret_val; | ||
799 | |||
800 | /* 82578 PHY - set the downshift count to 1x. */ | ||
801 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; | ||
802 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; | ||
803 | ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | ||
804 | if (ret_val) | ||
805 | return ret_val; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link | ||
813 | * @hw: pointer to the HW structure | ||
814 | * | ||
815 | * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for | ||
816 | * igp PHY's. | ||
817 | **/ | ||
818 | s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | ||
819 | { | ||
820 | struct e1000_phy_info *phy = &hw->phy; | ||
821 | s32 ret_val; | ||
822 | u16 data; | ||
823 | |||
824 | ret_val = e1000_phy_hw_reset(hw); | ||
825 | if (ret_val) { | ||
826 | e_dbg("Error resetting the PHY.\n"); | ||
827 | return ret_val; | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * Wait 100ms for MAC to configure PHY from NVM settings, to avoid | ||
832 | * timeout issues when LFS is enabled. | ||
833 | */ | ||
834 | msleep(100); | ||
835 | |||
836 | /* disable lplu d0 during driver init */ | ||
837 | ret_val = e1000_set_d0_lplu_state(hw, false); | ||
838 | if (ret_val) { | ||
839 | e_dbg("Error Disabling LPLU D0\n"); | ||
840 | return ret_val; | ||
841 | } | ||
842 | /* Configure mdi-mdix settings */ | ||
843 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); | ||
844 | if (ret_val) | ||
845 | return ret_val; | ||
846 | |||
847 | data &= ~IGP01E1000_PSCR_AUTO_MDIX; | ||
848 | |||
849 | switch (phy->mdix) { | ||
850 | case 1: | ||
851 | data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; | ||
852 | break; | ||
853 | case 2: | ||
854 | data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; | ||
855 | break; | ||
856 | case 0: | ||
857 | default: | ||
858 | data |= IGP01E1000_PSCR_AUTO_MDIX; | ||
859 | break; | ||
860 | } | ||
861 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); | ||
862 | if (ret_val) | ||
863 | return ret_val; | ||
864 | |||
865 | /* set auto-master slave resolution settings */ | ||
866 | if (hw->mac.autoneg) { | ||
867 | /* | ||
868 | * when autonegotiation advertisement is only 1000Mbps then we | ||
869 | * should disable SmartSpeed and enable Auto MasterSlave | ||
870 | * resolution as hardware default. | ||
871 | */ | ||
872 | if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { | ||
873 | /* Disable SmartSpeed */ | ||
874 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
875 | &data); | ||
876 | if (ret_val) | ||
877 | return ret_val; | ||
878 | |||
879 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
880 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
881 | data); | ||
882 | if (ret_val) | ||
883 | return ret_val; | ||
884 | |||
885 | /* Set auto Master/Slave resolution process */ | ||
886 | ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); | ||
887 | if (ret_val) | ||
888 | return ret_val; | ||
889 | |||
890 | data &= ~CR_1000T_MS_ENABLE; | ||
891 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); | ||
892 | if (ret_val) | ||
893 | return ret_val; | ||
894 | } | ||
895 | |||
896 | ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); | ||
897 | if (ret_val) | ||
898 | return ret_val; | ||
899 | |||
900 | /* load defaults for future use */ | ||
901 | phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? | ||
902 | ((data & CR_1000T_MS_VALUE) ? | ||
903 | e1000_ms_force_master : | ||
904 | e1000_ms_force_slave) : | ||
905 | e1000_ms_auto; | ||
906 | |||
907 | switch (phy->ms_type) { | ||
908 | case e1000_ms_force_master: | ||
909 | data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); | ||
910 | break; | ||
911 | case e1000_ms_force_slave: | ||
912 | data |= CR_1000T_MS_ENABLE; | ||
913 | data &= ~(CR_1000T_MS_VALUE); | ||
914 | break; | ||
915 | case e1000_ms_auto: | ||
916 | data &= ~CR_1000T_MS_ENABLE; | ||
917 | default: | ||
918 | break; | ||
919 | } | ||
920 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); | ||
921 | } | ||
922 | |||
923 | return ret_val; | ||
924 | } | ||
925 | |||
926 | /** | ||
927 | * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation | ||
928 | * @hw: pointer to the HW structure | ||
929 | * | ||
930 | * Reads the MII auto-neg advertisement register and/or the 1000T control | ||
931 | * register and if the PHY is already setup for auto-negotiation, then | ||
932 | * return successful. Otherwise, setup advertisement and flow control to | ||
933 | * the appropriate values for the wanted auto-negotiation. | ||
934 | **/ | ||
935 | static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | ||
936 | { | ||
937 | struct e1000_phy_info *phy = &hw->phy; | ||
938 | s32 ret_val; | ||
939 | u16 mii_autoneg_adv_reg; | ||
940 | u16 mii_1000t_ctrl_reg = 0; | ||
941 | |||
942 | phy->autoneg_advertised &= phy->autoneg_mask; | ||
943 | |||
944 | /* Read the MII Auto-Neg Advertisement Register (Address 4). */ | ||
945 | ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); | ||
946 | if (ret_val) | ||
947 | return ret_val; | ||
948 | |||
949 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) { | ||
950 | /* Read the MII 1000Base-T Control Register (Address 9). */ | ||
951 | ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | ||
952 | if (ret_val) | ||
953 | return ret_val; | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Need to parse both autoneg_advertised and fc and set up | ||
958 | * the appropriate PHY registers. First we will parse for | ||
959 | * autoneg_advertised software override. Since we can advertise | ||
960 | * a plethora of combinations, we need to check each bit | ||
961 | * individually. | ||
962 | */ | ||
963 | |||
964 | /* | ||
965 | * First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
966 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | ||
967 | * the 1000Base-T Control Register (Address 9). | ||
968 | */ | ||
969 | mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | | ||
970 | NWAY_AR_100TX_HD_CAPS | | ||
971 | NWAY_AR_10T_FD_CAPS | | ||
972 | NWAY_AR_10T_HD_CAPS); | ||
973 | mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); | ||
974 | |||
975 | e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); | ||
976 | |||
977 | /* Do we want to advertise 10 Mb Half Duplex? */ | ||
978 | if (phy->autoneg_advertised & ADVERTISE_10_HALF) { | ||
979 | e_dbg("Advertise 10mb Half duplex\n"); | ||
980 | mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; | ||
981 | } | ||
982 | |||
983 | /* Do we want to advertise 10 Mb Full Duplex? */ | ||
984 | if (phy->autoneg_advertised & ADVERTISE_10_FULL) { | ||
985 | e_dbg("Advertise 10mb Full duplex\n"); | ||
986 | mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; | ||
987 | } | ||
988 | |||
989 | /* Do we want to advertise 100 Mb Half Duplex? */ | ||
990 | if (phy->autoneg_advertised & ADVERTISE_100_HALF) { | ||
991 | e_dbg("Advertise 100mb Half duplex\n"); | ||
992 | mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; | ||
993 | } | ||
994 | |||
995 | /* Do we want to advertise 100 Mb Full Duplex? */ | ||
996 | if (phy->autoneg_advertised & ADVERTISE_100_FULL) { | ||
997 | e_dbg("Advertise 100mb Full duplex\n"); | ||
998 | mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; | ||
999 | } | ||
1000 | |||
1001 | /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ | ||
1002 | if (phy->autoneg_advertised & ADVERTISE_1000_HALF) | ||
1003 | e_dbg("Advertise 1000mb Half duplex request denied!\n"); | ||
1004 | |||
1005 | /* Do we want to advertise 1000 Mb Full Duplex? */ | ||
1006 | if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { | ||
1007 | e_dbg("Advertise 1000mb Full duplex\n"); | ||
1008 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Check for a software override of the flow control settings, and | ||
1013 | * setup the PHY advertisement registers accordingly. If | ||
1014 | * auto-negotiation is enabled, then software will have to set the | ||
1015 | * "PAUSE" bits to the correct value in the Auto-Negotiation | ||
1016 | * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- | ||
1017 | * negotiation. | ||
1018 | * | ||
1019 | * The possible values of the "fc" parameter are: | ||
1020 | * 0: Flow control is completely disabled | ||
1021 | * 1: Rx flow control is enabled (we can receive pause frames | ||
1022 | * but not send pause frames). | ||
1023 | * 2: Tx flow control is enabled (we can send pause frames | ||
1024 | * but we do not support receiving pause frames). | ||
1025 | * 3: Both Rx and Tx flow control (symmetric) are enabled. | ||
1026 | * other: No software override. The flow control configuration | ||
1027 | * in the EEPROM is used. | ||
1028 | */ | ||
1029 | switch (hw->fc.current_mode) { | ||
1030 | case e1000_fc_none: | ||
1031 | /* | ||
1032 | * Flow control (Rx & Tx) is completely disabled by a | ||
1033 | * software over-ride. | ||
1034 | */ | ||
1035 | mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | ||
1036 | break; | ||
1037 | case e1000_fc_rx_pause: | ||
1038 | /* | ||
1039 | * Rx Flow control is enabled, and Tx Flow control is | ||
1040 | * disabled, by a software over-ride. | ||
1041 | * | ||
1042 | * Since there really isn't a way to advertise that we are | ||
1043 | * capable of Rx Pause ONLY, we will advertise that we | ||
1044 | * support both symmetric and asymmetric Rx PAUSE. Later | ||
1045 | * (in e1000e_config_fc_after_link_up) we will disable the | ||
1046 | * hw's ability to send PAUSE frames. | ||
1047 | */ | ||
1048 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | ||
1049 | break; | ||
1050 | case e1000_fc_tx_pause: | ||
1051 | /* | ||
1052 | * Tx Flow control is enabled, and Rx Flow control is | ||
1053 | * disabled, by a software over-ride. | ||
1054 | */ | ||
1055 | mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; | ||
1056 | mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; | ||
1057 | break; | ||
1058 | case e1000_fc_full: | ||
1059 | /* | ||
1060 | * Flow control (both Rx and Tx) is enabled by a software | ||
1061 | * over-ride. | ||
1062 | */ | ||
1063 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | ||
1064 | break; | ||
1065 | default: | ||
1066 | e_dbg("Flow control param set incorrectly\n"); | ||
1067 | ret_val = -E1000_ERR_CONFIG; | ||
1068 | return ret_val; | ||
1069 | } | ||
1070 | |||
1071 | ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); | ||
1072 | if (ret_val) | ||
1073 | return ret_val; | ||
1074 | |||
1075 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | ||
1076 | |||
1077 | if (phy->autoneg_mask & ADVERTISE_1000_FULL) | ||
1078 | ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | ||
1079 | |||
1080 | return ret_val; | ||
1081 | } | ||
1082 | |||
1083 | /** | ||
1084 | * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link | ||
1085 | * @hw: pointer to the HW structure | ||
1086 | * | ||
1087 | * Performs initial bounds checking on autoneg advertisement parameter, then | ||
1088 | * configure to advertise the full capability. Setup the PHY to autoneg | ||
1089 | * and restart the negotiation process between the link partner. If | ||
1090 | * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. | ||
1091 | **/ | ||
1092 | static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | ||
1093 | { | ||
1094 | struct e1000_phy_info *phy = &hw->phy; | ||
1095 | s32 ret_val; | ||
1096 | u16 phy_ctrl; | ||
1097 | |||
1098 | /* | ||
1099 | * Perform some bounds checking on the autoneg advertisement | ||
1100 | * parameter. | ||
1101 | */ | ||
1102 | phy->autoneg_advertised &= phy->autoneg_mask; | ||
1103 | |||
1104 | /* | ||
1105 | * If autoneg_advertised is zero, we assume it was not defaulted | ||
1106 | * by the calling code so we set to advertise full capability. | ||
1107 | */ | ||
1108 | if (phy->autoneg_advertised == 0) | ||
1109 | phy->autoneg_advertised = phy->autoneg_mask; | ||
1110 | |||
1111 | e_dbg("Reconfiguring auto-neg advertisement params\n"); | ||
1112 | ret_val = e1000_phy_setup_autoneg(hw); | ||
1113 | if (ret_val) { | ||
1114 | e_dbg("Error Setting up Auto-Negotiation\n"); | ||
1115 | return ret_val; | ||
1116 | } | ||
1117 | e_dbg("Restarting Auto-Neg\n"); | ||
1118 | |||
1119 | /* | ||
1120 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | ||
1121 | * the Auto Neg Restart bit in the PHY control register. | ||
1122 | */ | ||
1123 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); | ||
1124 | if (ret_val) | ||
1125 | return ret_val; | ||
1126 | |||
1127 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); | ||
1128 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); | ||
1129 | if (ret_val) | ||
1130 | return ret_val; | ||
1131 | |||
1132 | /* | ||
1133 | * Does the user want to wait for Auto-Neg to complete here, or | ||
1134 | * check at a later time (for example, callback routine). | ||
1135 | */ | ||
1136 | if (phy->autoneg_wait_to_complete) { | ||
1137 | ret_val = e1000_wait_autoneg(hw); | ||
1138 | if (ret_val) { | ||
1139 | e_dbg("Error while waiting for " | ||
1140 | "autoneg to complete\n"); | ||
1141 | return ret_val; | ||
1142 | } | ||
1143 | } | ||
1144 | |||
1145 | hw->mac.get_link_status = 1; | ||
1146 | |||
1147 | return ret_val; | ||
1148 | } | ||
1149 | |||
1150 | /** | ||
1151 | * e1000e_setup_copper_link - Configure copper link settings | ||
1152 | * @hw: pointer to the HW structure | ||
1153 | * | ||
1154 | * Calls the appropriate function to configure the link for auto-neg or forced | ||
1155 | * speed and duplex. Then we check for link, once link is established calls | ||
1156 | * to configure collision distance and flow control are called. If link is | ||
1157 | * not established, we return -E1000_ERR_PHY (-2). | ||
1158 | **/ | ||
1159 | s32 e1000e_setup_copper_link(struct e1000_hw *hw) | ||
1160 | { | ||
1161 | s32 ret_val; | ||
1162 | bool link; | ||
1163 | |||
1164 | if (hw->mac.autoneg) { | ||
1165 | /* | ||
1166 | * Setup autoneg and flow control advertisement and perform | ||
1167 | * autonegotiation. | ||
1168 | */ | ||
1169 | ret_val = e1000_copper_link_autoneg(hw); | ||
1170 | if (ret_val) | ||
1171 | return ret_val; | ||
1172 | } else { | ||
1173 | /* | ||
1174 | * PHY will be set to 10H, 10F, 100H or 100F | ||
1175 | * depending on user settings. | ||
1176 | */ | ||
1177 | e_dbg("Forcing Speed and Duplex\n"); | ||
1178 | ret_val = e1000_phy_force_speed_duplex(hw); | ||
1179 | if (ret_val) { | ||
1180 | e_dbg("Error Forcing Speed and Duplex\n"); | ||
1181 | return ret_val; | ||
1182 | } | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * Check link status. Wait up to 100 microseconds for link to become | ||
1187 | * valid. | ||
1188 | */ | ||
1189 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1190 | COPPER_LINK_UP_LIMIT, | ||
1191 | 10, | ||
1192 | &link); | ||
1193 | if (ret_val) | ||
1194 | return ret_val; | ||
1195 | |||
1196 | if (link) { | ||
1197 | e_dbg("Valid link established!!!\n"); | ||
1198 | e1000e_config_collision_dist(hw); | ||
1199 | ret_val = e1000e_config_fc_after_link_up(hw); | ||
1200 | } else { | ||
1201 | e_dbg("Unable to establish link!!!\n"); | ||
1202 | } | ||
1203 | |||
1204 | return ret_val; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY | ||
1209 | * @hw: pointer to the HW structure | ||
1210 | * | ||
1211 | * Calls the PHY setup function to force speed and duplex. Clears the | ||
1212 | * auto-crossover to force MDI manually. Waits for link and returns | ||
1213 | * successful if link up is successful, else -E1000_ERR_PHY (-2). | ||
1214 | **/ | ||
1215 | s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | ||
1216 | { | ||
1217 | struct e1000_phy_info *phy = &hw->phy; | ||
1218 | s32 ret_val; | ||
1219 | u16 phy_data; | ||
1220 | bool link; | ||
1221 | |||
1222 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | ||
1223 | if (ret_val) | ||
1224 | return ret_val; | ||
1225 | |||
1226 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | ||
1227 | |||
1228 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | ||
1229 | if (ret_val) | ||
1230 | return ret_val; | ||
1231 | |||
1232 | /* | ||
1233 | * Clear Auto-Crossover to force MDI manually. IGP requires MDI | ||
1234 | * forced whenever speed and duplex are forced. | ||
1235 | */ | ||
1236 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); | ||
1237 | if (ret_val) | ||
1238 | return ret_val; | ||
1239 | |||
1240 | phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; | ||
1241 | phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; | ||
1242 | |||
1243 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); | ||
1244 | if (ret_val) | ||
1245 | return ret_val; | ||
1246 | |||
1247 | e_dbg("IGP PSCR: %X\n", phy_data); | ||
1248 | |||
1249 | udelay(1); | ||
1250 | |||
1251 | if (phy->autoneg_wait_to_complete) { | ||
1252 | e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); | ||
1253 | |||
1254 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1255 | PHY_FORCE_LIMIT, | ||
1256 | 100000, | ||
1257 | &link); | ||
1258 | if (ret_val) | ||
1259 | return ret_val; | ||
1260 | |||
1261 | if (!link) | ||
1262 | e_dbg("Link taking longer than expected.\n"); | ||
1263 | |||
1264 | /* Try once more */ | ||
1265 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1266 | PHY_FORCE_LIMIT, | ||
1267 | 100000, | ||
1268 | &link); | ||
1269 | if (ret_val) | ||
1270 | return ret_val; | ||
1271 | } | ||
1272 | |||
1273 | return ret_val; | ||
1274 | } | ||
1275 | |||
1276 | /** | ||
1277 | * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY | ||
1278 | * @hw: pointer to the HW structure | ||
1279 | * | ||
1280 | * Calls the PHY setup function to force speed and duplex. Clears the | ||
1281 | * auto-crossover to force MDI manually. Resets the PHY to commit the | ||
1282 | * changes. If time expires while waiting for link up, we reset the DSP. | ||
1283 | * After reset, TX_CLK and CRS on Tx must be set. Return successful upon | ||
1284 | * successful completion, else return corresponding error code. | ||
1285 | **/ | ||
1286 | s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | ||
1287 | { | ||
1288 | struct e1000_phy_info *phy = &hw->phy; | ||
1289 | s32 ret_val; | ||
1290 | u16 phy_data; | ||
1291 | bool link; | ||
1292 | |||
1293 | /* | ||
1294 | * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | ||
1295 | * forced whenever speed and duplex are forced. | ||
1296 | */ | ||
1297 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
1298 | if (ret_val) | ||
1299 | return ret_val; | ||
1300 | |||
1301 | phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; | ||
1302 | ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | ||
1303 | if (ret_val) | ||
1304 | return ret_val; | ||
1305 | |||
1306 | e_dbg("M88E1000 PSCR: %X\n", phy_data); | ||
1307 | |||
1308 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | ||
1309 | if (ret_val) | ||
1310 | return ret_val; | ||
1311 | |||
1312 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | ||
1313 | |||
1314 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | ||
1315 | if (ret_val) | ||
1316 | return ret_val; | ||
1317 | |||
1318 | /* Reset the phy to commit changes. */ | ||
1319 | ret_val = e1000e_commit_phy(hw); | ||
1320 | if (ret_val) | ||
1321 | return ret_val; | ||
1322 | |||
1323 | if (phy->autoneg_wait_to_complete) { | ||
1324 | e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); | ||
1325 | |||
1326 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | ||
1327 | 100000, &link); | ||
1328 | if (ret_val) | ||
1329 | return ret_val; | ||
1330 | |||
1331 | if (!link) { | ||
1332 | if (hw->phy.type != e1000_phy_m88) { | ||
1333 | e_dbg("Link taking longer than expected.\n"); | ||
1334 | } else { | ||
1335 | /* | ||
1336 | * We didn't get link. | ||
1337 | * Reset the DSP and cross our fingers. | ||
1338 | */ | ||
1339 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, | ||
1340 | 0x001d); | ||
1341 | if (ret_val) | ||
1342 | return ret_val; | ||
1343 | ret_val = e1000e_phy_reset_dsp(hw); | ||
1344 | if (ret_val) | ||
1345 | return ret_val; | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1349 | /* Try once more */ | ||
1350 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | ||
1351 | 100000, &link); | ||
1352 | if (ret_val) | ||
1353 | return ret_val; | ||
1354 | } | ||
1355 | |||
1356 | if (hw->phy.type != e1000_phy_m88) | ||
1357 | return 0; | ||
1358 | |||
1359 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
1360 | if (ret_val) | ||
1361 | return ret_val; | ||
1362 | |||
1363 | /* | ||
1364 | * Resetting the phy means we need to re-force TX_CLK in the | ||
1365 | * Extended PHY Specific Control Register to 25MHz clock from | ||
1366 | * the reset value of 2.5MHz. | ||
1367 | */ | ||
1368 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
1369 | ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | ||
1370 | if (ret_val) | ||
1371 | return ret_val; | ||
1372 | |||
1373 | /* | ||
1374 | * In addition, we must re-enable CRS on Tx for both half and full | ||
1375 | * duplex. | ||
1376 | */ | ||
1377 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
1378 | if (ret_val) | ||
1379 | return ret_val; | ||
1380 | |||
1381 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | ||
1382 | ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | ||
1383 | |||
1384 | return ret_val; | ||
1385 | } | ||
1386 | |||
1387 | /** | ||
1388 | * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex | ||
1389 | * @hw: pointer to the HW structure | ||
1390 | * | ||
1391 | * Forces the speed and duplex settings of the PHY. | ||
1392 | * This is a function pointer entry point only called by | ||
1393 | * PHY setup routines. | ||
1394 | **/ | ||
1395 | s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) | ||
1396 | { | ||
1397 | struct e1000_phy_info *phy = &hw->phy; | ||
1398 | s32 ret_val; | ||
1399 | u16 data; | ||
1400 | bool link; | ||
1401 | |||
1402 | ret_val = e1e_rphy(hw, PHY_CONTROL, &data); | ||
1403 | if (ret_val) | ||
1404 | goto out; | ||
1405 | |||
1406 | e1000e_phy_force_speed_duplex_setup(hw, &data); | ||
1407 | |||
1408 | ret_val = e1e_wphy(hw, PHY_CONTROL, data); | ||
1409 | if (ret_val) | ||
1410 | goto out; | ||
1411 | |||
1412 | /* Disable MDI-X support for 10/100 */ | ||
1413 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
1414 | if (ret_val) | ||
1415 | goto out; | ||
1416 | |||
1417 | data &= ~IFE_PMC_AUTO_MDIX; | ||
1418 | data &= ~IFE_PMC_FORCE_MDIX; | ||
1419 | |||
1420 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); | ||
1421 | if (ret_val) | ||
1422 | goto out; | ||
1423 | |||
1424 | e_dbg("IFE PMC: %X\n", data); | ||
1425 | |||
1426 | udelay(1); | ||
1427 | |||
1428 | if (phy->autoneg_wait_to_complete) { | ||
1429 | e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); | ||
1430 | |||
1431 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1432 | PHY_FORCE_LIMIT, | ||
1433 | 100000, | ||
1434 | &link); | ||
1435 | if (ret_val) | ||
1436 | goto out; | ||
1437 | |||
1438 | if (!link) | ||
1439 | e_dbg("Link taking longer than expected.\n"); | ||
1440 | |||
1441 | /* Try once more */ | ||
1442 | ret_val = e1000e_phy_has_link_generic(hw, | ||
1443 | PHY_FORCE_LIMIT, | ||
1444 | 100000, | ||
1445 | &link); | ||
1446 | if (ret_val) | ||
1447 | goto out; | ||
1448 | } | ||
1449 | |||
1450 | out: | ||
1451 | return ret_val; | ||
1452 | } | ||
1453 | |||
1454 | /** | ||
1455 | * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex | ||
1456 | * @hw: pointer to the HW structure | ||
1457 | * @phy_ctrl: pointer to current value of PHY_CONTROL | ||
1458 | * | ||
1459 | * Forces speed and duplex on the PHY by doing the following: disable flow | ||
1460 | * control, force speed/duplex on the MAC, disable auto speed detection, | ||
1461 | * disable auto-negotiation, configure duplex, configure speed, configure | ||
1462 | * the collision distance, write configuration to CTRL register. The | ||
1463 | * caller must write to the PHY_CONTROL register for these settings to | ||
1464 | * take affect. | ||
1465 | **/ | ||
1466 | void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | ||
1467 | { | ||
1468 | struct e1000_mac_info *mac = &hw->mac; | ||
1469 | u32 ctrl; | ||
1470 | |||
1471 | /* Turn off flow control when forcing speed/duplex */ | ||
1472 | hw->fc.current_mode = e1000_fc_none; | ||
1473 | |||
1474 | /* Force speed/duplex on the mac */ | ||
1475 | ctrl = er32(CTRL); | ||
1476 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | ||
1477 | ctrl &= ~E1000_CTRL_SPD_SEL; | ||
1478 | |||
1479 | /* Disable Auto Speed Detection */ | ||
1480 | ctrl &= ~E1000_CTRL_ASDE; | ||
1481 | |||
1482 | /* Disable autoneg on the phy */ | ||
1483 | *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; | ||
1484 | |||
1485 | /* Forcing Full or Half Duplex? */ | ||
1486 | if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { | ||
1487 | ctrl &= ~E1000_CTRL_FD; | ||
1488 | *phy_ctrl &= ~MII_CR_FULL_DUPLEX; | ||
1489 | e_dbg("Half Duplex\n"); | ||
1490 | } else { | ||
1491 | ctrl |= E1000_CTRL_FD; | ||
1492 | *phy_ctrl |= MII_CR_FULL_DUPLEX; | ||
1493 | e_dbg("Full Duplex\n"); | ||
1494 | } | ||
1495 | |||
1496 | /* Forcing 10mb or 100mb? */ | ||
1497 | if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { | ||
1498 | ctrl |= E1000_CTRL_SPD_100; | ||
1499 | *phy_ctrl |= MII_CR_SPEED_100; | ||
1500 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); | ||
1501 | e_dbg("Forcing 100mb\n"); | ||
1502 | } else { | ||
1503 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | ||
1504 | *phy_ctrl |= MII_CR_SPEED_10; | ||
1505 | *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); | ||
1506 | e_dbg("Forcing 10mb\n"); | ||
1507 | } | ||
1508 | |||
1509 | e1000e_config_collision_dist(hw); | ||
1510 | |||
1511 | ew32(CTRL, ctrl); | ||
1512 | } | ||
1513 | |||
1514 | /** | ||
1515 | * e1000e_set_d3_lplu_state - Sets low power link up state for D3 | ||
1516 | * @hw: pointer to the HW structure | ||
1517 | * @active: boolean used to enable/disable lplu | ||
1518 | * | ||
1519 | * Success returns 0, Failure returns 1 | ||
1520 | * | ||
1521 | * The low power link up (lplu) state is set to the power management level D3 | ||
1522 | * and SmartSpeed is disabled when active is true, else clear lplu for D3 | ||
1523 | * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU | ||
1524 | * is used during Dx states where the power conservation is most important. | ||
1525 | * During driver activity, SmartSpeed should be enabled so performance is | ||
1526 | * maintained. | ||
1527 | **/ | ||
1528 | s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) | ||
1529 | { | ||
1530 | struct e1000_phy_info *phy = &hw->phy; | ||
1531 | s32 ret_val; | ||
1532 | u16 data; | ||
1533 | |||
1534 | ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); | ||
1535 | if (ret_val) | ||
1536 | return ret_val; | ||
1537 | |||
1538 | if (!active) { | ||
1539 | data &= ~IGP02E1000_PM_D3_LPLU; | ||
1540 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | ||
1541 | if (ret_val) | ||
1542 | return ret_val; | ||
1543 | /* | ||
1544 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
1545 | * during Dx states where the power conservation is most | ||
1546 | * important. During driver activity we should enable | ||
1547 | * SmartSpeed, so performance is maintained. | ||
1548 | */ | ||
1549 | if (phy->smart_speed == e1000_smart_speed_on) { | ||
1550 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1551 | &data); | ||
1552 | if (ret_val) | ||
1553 | return ret_val; | ||
1554 | |||
1555 | data |= IGP01E1000_PSCFR_SMART_SPEED; | ||
1556 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1557 | data); | ||
1558 | if (ret_val) | ||
1559 | return ret_val; | ||
1560 | } else if (phy->smart_speed == e1000_smart_speed_off) { | ||
1561 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1562 | &data); | ||
1563 | if (ret_val) | ||
1564 | return ret_val; | ||
1565 | |||
1566 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
1567 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | ||
1568 | data); | ||
1569 | if (ret_val) | ||
1570 | return ret_val; | ||
1571 | } | ||
1572 | } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || | ||
1573 | (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || | ||
1574 | (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { | ||
1575 | data |= IGP02E1000_PM_D3_LPLU; | ||
1576 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | ||
1577 | if (ret_val) | ||
1578 | return ret_val; | ||
1579 | |||
1580 | /* When LPLU is enabled, we should disable SmartSpeed */ | ||
1581 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); | ||
1582 | if (ret_val) | ||
1583 | return ret_val; | ||
1584 | |||
1585 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | ||
1586 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); | ||
1587 | } | ||
1588 | |||
1589 | return ret_val; | ||
1590 | } | ||
1591 | |||
1592 | /** | ||
1593 | * e1000e_check_downshift - Checks whether a downshift in speed occurred | ||
1594 | * @hw: pointer to the HW structure | ||
1595 | * | ||
1596 | * Success returns 0, Failure returns 1 | ||
1597 | * | ||
1598 | * A downshift is detected by querying the PHY link health. | ||
1599 | **/ | ||
1600 | s32 e1000e_check_downshift(struct e1000_hw *hw) | ||
1601 | { | ||
1602 | struct e1000_phy_info *phy = &hw->phy; | ||
1603 | s32 ret_val; | ||
1604 | u16 phy_data, offset, mask; | ||
1605 | |||
1606 | switch (phy->type) { | ||
1607 | case e1000_phy_m88: | ||
1608 | case e1000_phy_gg82563: | ||
1609 | case e1000_phy_bm: | ||
1610 | case e1000_phy_82578: | ||
1611 | offset = M88E1000_PHY_SPEC_STATUS; | ||
1612 | mask = M88E1000_PSSR_DOWNSHIFT; | ||
1613 | break; | ||
1614 | case e1000_phy_igp_2: | ||
1615 | case e1000_phy_igp_3: | ||
1616 | offset = IGP01E1000_PHY_LINK_HEALTH; | ||
1617 | mask = IGP01E1000_PLHR_SS_DOWNGRADE; | ||
1618 | break; | ||
1619 | default: | ||
1620 | /* speed downshift not supported */ | ||
1621 | phy->speed_downgraded = false; | ||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1626 | |||
1627 | if (!ret_val) | ||
1628 | phy->speed_downgraded = (phy_data & mask); | ||
1629 | |||
1630 | return ret_val; | ||
1631 | } | ||
1632 | |||
1633 | /** | ||
1634 | * e1000_check_polarity_m88 - Checks the polarity. | ||
1635 | * @hw: pointer to the HW structure | ||
1636 | * | ||
1637 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
1638 | * | ||
1639 | * Polarity is determined based on the PHY specific status register. | ||
1640 | **/ | ||
1641 | s32 e1000_check_polarity_m88(struct e1000_hw *hw) | ||
1642 | { | ||
1643 | struct e1000_phy_info *phy = &hw->phy; | ||
1644 | s32 ret_val; | ||
1645 | u16 data; | ||
1646 | |||
1647 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); | ||
1648 | |||
1649 | if (!ret_val) | ||
1650 | phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) | ||
1651 | ? e1000_rev_polarity_reversed | ||
1652 | : e1000_rev_polarity_normal; | ||
1653 | |||
1654 | return ret_val; | ||
1655 | } | ||
1656 | |||
1657 | /** | ||
1658 | * e1000_check_polarity_igp - Checks the polarity. | ||
1659 | * @hw: pointer to the HW structure | ||
1660 | * | ||
1661 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
1662 | * | ||
1663 | * Polarity is determined based on the PHY port status register, and the | ||
1664 | * current speed (since there is no polarity at 100Mbps). | ||
1665 | **/ | ||
1666 | s32 e1000_check_polarity_igp(struct e1000_hw *hw) | ||
1667 | { | ||
1668 | struct e1000_phy_info *phy = &hw->phy; | ||
1669 | s32 ret_val; | ||
1670 | u16 data, offset, mask; | ||
1671 | |||
1672 | /* | ||
1673 | * Polarity is determined based on the speed of | ||
1674 | * our connection. | ||
1675 | */ | ||
1676 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); | ||
1677 | if (ret_val) | ||
1678 | return ret_val; | ||
1679 | |||
1680 | if ((data & IGP01E1000_PSSR_SPEED_MASK) == | ||
1681 | IGP01E1000_PSSR_SPEED_1000MBPS) { | ||
1682 | offset = IGP01E1000_PHY_PCS_INIT_REG; | ||
1683 | mask = IGP01E1000_PHY_POLARITY_MASK; | ||
1684 | } else { | ||
1685 | /* | ||
1686 | * This really only applies to 10Mbps since | ||
1687 | * there is no polarity for 100Mbps (always 0). | ||
1688 | */ | ||
1689 | offset = IGP01E1000_PHY_PORT_STATUS; | ||
1690 | mask = IGP01E1000_PSSR_POLARITY_REVERSED; | ||
1691 | } | ||
1692 | |||
1693 | ret_val = e1e_rphy(hw, offset, &data); | ||
1694 | |||
1695 | if (!ret_val) | ||
1696 | phy->cable_polarity = (data & mask) | ||
1697 | ? e1000_rev_polarity_reversed | ||
1698 | : e1000_rev_polarity_normal; | ||
1699 | |||
1700 | return ret_val; | ||
1701 | } | ||
1702 | |||
1703 | /** | ||
1704 | * e1000_check_polarity_ife - Check cable polarity for IFE PHY | ||
1705 | * @hw: pointer to the HW structure | ||
1706 | * | ||
1707 | * Polarity is determined on the polarity reversal feature being enabled. | ||
1708 | **/ | ||
1709 | s32 e1000_check_polarity_ife(struct e1000_hw *hw) | ||
1710 | { | ||
1711 | struct e1000_phy_info *phy = &hw->phy; | ||
1712 | s32 ret_val; | ||
1713 | u16 phy_data, offset, mask; | ||
1714 | |||
1715 | /* | ||
1716 | * Polarity is determined based on the reversal feature being enabled. | ||
1717 | */ | ||
1718 | if (phy->polarity_correction) { | ||
1719 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | ||
1720 | mask = IFE_PESC_POLARITY_REVERSED; | ||
1721 | } else { | ||
1722 | offset = IFE_PHY_SPECIAL_CONTROL; | ||
1723 | mask = IFE_PSC_FORCE_POLARITY; | ||
1724 | } | ||
1725 | |||
1726 | ret_val = e1e_rphy(hw, offset, &phy_data); | ||
1727 | |||
1728 | if (!ret_val) | ||
1729 | phy->cable_polarity = (phy_data & mask) | ||
1730 | ? e1000_rev_polarity_reversed | ||
1731 | : e1000_rev_polarity_normal; | ||
1732 | |||
1733 | return ret_val; | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * e1000_wait_autoneg - Wait for auto-neg completion | ||
1738 | * @hw: pointer to the HW structure | ||
1739 | * | ||
1740 | * Waits for auto-negotiation to complete or for the auto-negotiation time | ||
1741 | * limit to expire, which ever happens first. | ||
1742 | **/ | ||
1743 | static s32 e1000_wait_autoneg(struct e1000_hw *hw) | ||
1744 | { | ||
1745 | s32 ret_val = 0; | ||
1746 | u16 i, phy_status; | ||
1747 | |||
1748 | /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ | ||
1749 | for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { | ||
1750 | ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
1751 | if (ret_val) | ||
1752 | break; | ||
1753 | ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
1754 | if (ret_val) | ||
1755 | break; | ||
1756 | if (phy_status & MII_SR_AUTONEG_COMPLETE) | ||
1757 | break; | ||
1758 | msleep(100); | ||
1759 | } | ||
1760 | |||
1761 | /* | ||
1762 | * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | ||
1763 | * has completed. | ||
1764 | */ | ||
1765 | return ret_val; | ||
1766 | } | ||
1767 | |||
1768 | /** | ||
1769 | * e1000e_phy_has_link_generic - Polls PHY for link | ||
1770 | * @hw: pointer to the HW structure | ||
1771 | * @iterations: number of times to poll for link | ||
1772 | * @usec_interval: delay between polling attempts | ||
1773 | * @success: pointer to whether polling was successful or not | ||
1774 | * | ||
1775 | * Polls the PHY status register for link, 'iterations' number of times. | ||
1776 | **/ | ||
1777 | s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | ||
1778 | u32 usec_interval, bool *success) | ||
1779 | { | ||
1780 | s32 ret_val = 0; | ||
1781 | u16 i, phy_status; | ||
1782 | |||
1783 | for (i = 0; i < iterations; i++) { | ||
1784 | /* | ||
1785 | * Some PHYs require the PHY_STATUS register to be read | ||
1786 | * twice due to the link bit being sticky. No harm doing | ||
1787 | * it across the board. | ||
1788 | */ | ||
1789 | ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
1790 | if (ret_val) | ||
1791 | /* | ||
1792 | * If the first read fails, another entity may have | ||
1793 | * ownership of the resources, wait and try again to | ||
1794 | * see if they have relinquished the resources yet. | ||
1795 | */ | ||
1796 | udelay(usec_interval); | ||
1797 | ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); | ||
1798 | if (ret_val) | ||
1799 | break; | ||
1800 | if (phy_status & MII_SR_LINK_STATUS) | ||
1801 | break; | ||
1802 | if (usec_interval >= 1000) | ||
1803 | mdelay(usec_interval/1000); | ||
1804 | else | ||
1805 | udelay(usec_interval); | ||
1806 | } | ||
1807 | |||
1808 | *success = (i < iterations); | ||
1809 | |||
1810 | return ret_val; | ||
1811 | } | ||
1812 | |||
1813 | /** | ||
1814 | * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY | ||
1815 | * @hw: pointer to the HW structure | ||
1816 | * | ||
1817 | * Reads the PHY specific status register to retrieve the cable length | ||
1818 | * information. The cable length is determined by averaging the minimum and | ||
1819 | * maximum values to get the "average" cable length. The m88 PHY has four | ||
1820 | * possible cable length values, which are: | ||
1821 | * Register Value Cable Length | ||
1822 | * 0 < 50 meters | ||
1823 | * 1 50 - 80 meters | ||
1824 | * 2 80 - 110 meters | ||
1825 | * 3 110 - 140 meters | ||
1826 | * 4 > 140 meters | ||
1827 | **/ | ||
1828 | s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | ||
1829 | { | ||
1830 | struct e1000_phy_info *phy = &hw->phy; | ||
1831 | s32 ret_val; | ||
1832 | u16 phy_data, index; | ||
1833 | |||
1834 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | ||
1835 | if (ret_val) | ||
1836 | goto out; | ||
1837 | |||
1838 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | ||
1839 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; | ||
1840 | if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { | ||
1841 | ret_val = -E1000_ERR_PHY; | ||
1842 | goto out; | ||
1843 | } | ||
1844 | |||
1845 | phy->min_cable_length = e1000_m88_cable_length_table[index]; | ||
1846 | phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; | ||
1847 | |||
1848 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | ||
1849 | |||
1850 | out: | ||
1851 | return ret_val; | ||
1852 | } | ||
1853 | |||
1854 | /** | ||
1855 | * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY | ||
1856 | * @hw: pointer to the HW structure | ||
1857 | * | ||
1858 | * The automatic gain control (agc) normalizes the amplitude of the | ||
1859 | * received signal, adjusting for the attenuation produced by the | ||
1860 | * cable. By reading the AGC registers, which represent the | ||
1861 | * combination of coarse and fine gain value, the value can be put | ||
1862 | * into a lookup table to obtain the approximate cable length | ||
1863 | * for each channel. | ||
1864 | **/ | ||
1865 | s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) | ||
1866 | { | ||
1867 | struct e1000_phy_info *phy = &hw->phy; | ||
1868 | s32 ret_val; | ||
1869 | u16 phy_data, i, agc_value = 0; | ||
1870 | u16 cur_agc_index, max_agc_index = 0; | ||
1871 | u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; | ||
1872 | static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { | ||
1873 | IGP02E1000_PHY_AGC_A, | ||
1874 | IGP02E1000_PHY_AGC_B, | ||
1875 | IGP02E1000_PHY_AGC_C, | ||
1876 | IGP02E1000_PHY_AGC_D | ||
1877 | }; | ||
1878 | |||
1879 | /* Read the AGC registers for all channels */ | ||
1880 | for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { | ||
1881 | ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); | ||
1882 | if (ret_val) | ||
1883 | return ret_val; | ||
1884 | |||
1885 | /* | ||
1886 | * Getting bits 15:9, which represent the combination of | ||
1887 | * coarse and fine gain values. The result is a number | ||
1888 | * that can be put into the lookup table to obtain the | ||
1889 | * approximate cable length. | ||
1890 | */ | ||
1891 | cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | ||
1892 | IGP02E1000_AGC_LENGTH_MASK; | ||
1893 | |||
1894 | /* Array index bound check. */ | ||
1895 | if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || | ||
1896 | (cur_agc_index == 0)) | ||
1897 | return -E1000_ERR_PHY; | ||
1898 | |||
1899 | /* Remove min & max AGC values from calculation. */ | ||
1900 | if (e1000_igp_2_cable_length_table[min_agc_index] > | ||
1901 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
1902 | min_agc_index = cur_agc_index; | ||
1903 | if (e1000_igp_2_cable_length_table[max_agc_index] < | ||
1904 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
1905 | max_agc_index = cur_agc_index; | ||
1906 | |||
1907 | agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; | ||
1908 | } | ||
1909 | |||
1910 | agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + | ||
1911 | e1000_igp_2_cable_length_table[max_agc_index]); | ||
1912 | agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); | ||
1913 | |||
1914 | /* Calculate cable length with the error range of +/- 10 meters. */ | ||
1915 | phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? | ||
1916 | (agc_value - IGP02E1000_AGC_RANGE) : 0; | ||
1917 | phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; | ||
1918 | |||
1919 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | ||
1920 | |||
1921 | return ret_val; | ||
1922 | } | ||
1923 | |||
1924 | /** | ||
1925 | * e1000e_get_phy_info_m88 - Retrieve PHY information | ||
1926 | * @hw: pointer to the HW structure | ||
1927 | * | ||
1928 | * Valid for only copper links. Read the PHY status register (sticky read) | ||
1929 | * to verify that link is up. Read the PHY special control register to | ||
1930 | * determine the polarity and 10base-T extended distance. Read the PHY | ||
1931 | * special status register to determine MDI/MDIx and current speed. If | ||
1932 | * speed is 1000, then determine cable length, local and remote receiver. | ||
1933 | **/ | ||
1934 | s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | ||
1935 | { | ||
1936 | struct e1000_phy_info *phy = &hw->phy; | ||
1937 | s32 ret_val; | ||
1938 | u16 phy_data; | ||
1939 | bool link; | ||
1940 | |||
1941 | if (phy->media_type != e1000_media_type_copper) { | ||
1942 | e_dbg("Phy info is only valid for copper media\n"); | ||
1943 | return -E1000_ERR_CONFIG; | ||
1944 | } | ||
1945 | |||
1946 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
1947 | if (ret_val) | ||
1948 | return ret_val; | ||
1949 | |||
1950 | if (!link) { | ||
1951 | e_dbg("Phy info is only valid if link is up\n"); | ||
1952 | return -E1000_ERR_CONFIG; | ||
1953 | } | ||
1954 | |||
1955 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
1956 | if (ret_val) | ||
1957 | return ret_val; | ||
1958 | |||
1959 | phy->polarity_correction = (phy_data & | ||
1960 | M88E1000_PSCR_POLARITY_REVERSAL); | ||
1961 | |||
1962 | ret_val = e1000_check_polarity_m88(hw); | ||
1963 | if (ret_val) | ||
1964 | return ret_val; | ||
1965 | |||
1966 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | ||
1967 | if (ret_val) | ||
1968 | return ret_val; | ||
1969 | |||
1970 | phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); | ||
1971 | |||
1972 | if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { | ||
1973 | ret_val = e1000_get_cable_length(hw); | ||
1974 | if (ret_val) | ||
1975 | return ret_val; | ||
1976 | |||
1977 | ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | ||
1978 | if (ret_val) | ||
1979 | return ret_val; | ||
1980 | |||
1981 | phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) | ||
1982 | ? e1000_1000t_rx_status_ok | ||
1983 | : e1000_1000t_rx_status_not_ok; | ||
1984 | |||
1985 | phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) | ||
1986 | ? e1000_1000t_rx_status_ok | ||
1987 | : e1000_1000t_rx_status_not_ok; | ||
1988 | } else { | ||
1989 | /* Set values to "undefined" */ | ||
1990 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
1991 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
1992 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
1993 | } | ||
1994 | |||
1995 | return ret_val; | ||
1996 | } | ||
1997 | |||
1998 | /** | ||
1999 | * e1000e_get_phy_info_igp - Retrieve igp PHY information | ||
2000 | * @hw: pointer to the HW structure | ||
2001 | * | ||
2002 | * Read PHY status to determine if link is up. If link is up, then | ||
2003 | * set/determine 10base-T extended distance and polarity correction. Read | ||
2004 | * PHY port status to determine MDI/MDIx and speed. Based on the speed, | ||
2005 | * determine on the cable length, local and remote receiver. | ||
2006 | **/ | ||
2007 | s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) | ||
2008 | { | ||
2009 | struct e1000_phy_info *phy = &hw->phy; | ||
2010 | s32 ret_val; | ||
2011 | u16 data; | ||
2012 | bool link; | ||
2013 | |||
2014 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
2015 | if (ret_val) | ||
2016 | return ret_val; | ||
2017 | |||
2018 | if (!link) { | ||
2019 | e_dbg("Phy info is only valid if link is up\n"); | ||
2020 | return -E1000_ERR_CONFIG; | ||
2021 | } | ||
2022 | |||
2023 | phy->polarity_correction = true; | ||
2024 | |||
2025 | ret_val = e1000_check_polarity_igp(hw); | ||
2026 | if (ret_val) | ||
2027 | return ret_val; | ||
2028 | |||
2029 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); | ||
2030 | if (ret_val) | ||
2031 | return ret_val; | ||
2032 | |||
2033 | phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); | ||
2034 | |||
2035 | if ((data & IGP01E1000_PSSR_SPEED_MASK) == | ||
2036 | IGP01E1000_PSSR_SPEED_1000MBPS) { | ||
2037 | ret_val = e1000_get_cable_length(hw); | ||
2038 | if (ret_val) | ||
2039 | return ret_val; | ||
2040 | |||
2041 | ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); | ||
2042 | if (ret_val) | ||
2043 | return ret_val; | ||
2044 | |||
2045 | phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | ||
2046 | ? e1000_1000t_rx_status_ok | ||
2047 | : e1000_1000t_rx_status_not_ok; | ||
2048 | |||
2049 | phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | ||
2050 | ? e1000_1000t_rx_status_ok | ||
2051 | : e1000_1000t_rx_status_not_ok; | ||
2052 | } else { | ||
2053 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
2054 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
2055 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
2056 | } | ||
2057 | |||
2058 | return ret_val; | ||
2059 | } | ||
2060 | |||
2061 | /** | ||
2062 | * e1000_get_phy_info_ife - Retrieves various IFE PHY states | ||
2063 | * @hw: pointer to the HW structure | ||
2064 | * | ||
2065 | * Populates "phy" structure with various feature states. | ||
2066 | **/ | ||
2067 | s32 e1000_get_phy_info_ife(struct e1000_hw *hw) | ||
2068 | { | ||
2069 | struct e1000_phy_info *phy = &hw->phy; | ||
2070 | s32 ret_val; | ||
2071 | u16 data; | ||
2072 | bool link; | ||
2073 | |||
2074 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
2075 | if (ret_val) | ||
2076 | goto out; | ||
2077 | |||
2078 | if (!link) { | ||
2079 | e_dbg("Phy info is only valid if link is up\n"); | ||
2080 | ret_val = -E1000_ERR_CONFIG; | ||
2081 | goto out; | ||
2082 | } | ||
2083 | |||
2084 | ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); | ||
2085 | if (ret_val) | ||
2086 | goto out; | ||
2087 | phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) | ||
2088 | ? false : true; | ||
2089 | |||
2090 | if (phy->polarity_correction) { | ||
2091 | ret_val = e1000_check_polarity_ife(hw); | ||
2092 | if (ret_val) | ||
2093 | goto out; | ||
2094 | } else { | ||
2095 | /* Polarity is forced */ | ||
2096 | phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) | ||
2097 | ? e1000_rev_polarity_reversed | ||
2098 | : e1000_rev_polarity_normal; | ||
2099 | } | ||
2100 | |||
2101 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); | ||
2102 | if (ret_val) | ||
2103 | goto out; | ||
2104 | |||
2105 | phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; | ||
2106 | |||
2107 | /* The following parameters are undefined for 10/100 operation. */ | ||
2108 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
2109 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
2110 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
2111 | |||
2112 | out: | ||
2113 | return ret_val; | ||
2114 | } | ||
2115 | |||
2116 | /** | ||
2117 | * e1000e_phy_sw_reset - PHY software reset | ||
2118 | * @hw: pointer to the HW structure | ||
2119 | * | ||
2120 | * Does a software reset of the PHY by reading the PHY control register and | ||
2121 | * setting/write the control register reset bit to the PHY. | ||
2122 | **/ | ||
2123 | s32 e1000e_phy_sw_reset(struct e1000_hw *hw) | ||
2124 | { | ||
2125 | s32 ret_val; | ||
2126 | u16 phy_ctrl; | ||
2127 | |||
2128 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); | ||
2129 | if (ret_val) | ||
2130 | return ret_val; | ||
2131 | |||
2132 | phy_ctrl |= MII_CR_RESET; | ||
2133 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); | ||
2134 | if (ret_val) | ||
2135 | return ret_val; | ||
2136 | |||
2137 | udelay(1); | ||
2138 | |||
2139 | return ret_val; | ||
2140 | } | ||
2141 | |||
2142 | /** | ||
2143 | * e1000e_phy_hw_reset_generic - PHY hardware reset | ||
2144 | * @hw: pointer to the HW structure | ||
2145 | * | ||
2146 | * Verify the reset block is not blocking us from resetting. Acquire | ||
2147 | * semaphore (if necessary) and read/set/write the device control reset | ||
2148 | * bit in the PHY. Wait the appropriate delay time for the device to | ||
2149 | * reset and release the semaphore (if necessary). | ||
2150 | **/ | ||
2151 | s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | ||
2152 | { | ||
2153 | struct e1000_phy_info *phy = &hw->phy; | ||
2154 | s32 ret_val; | ||
2155 | u32 ctrl; | ||
2156 | |||
2157 | ret_val = e1000_check_reset_block(hw); | ||
2158 | if (ret_val) | ||
2159 | return 0; | ||
2160 | |||
2161 | ret_val = phy->ops.acquire(hw); | ||
2162 | if (ret_val) | ||
2163 | return ret_val; | ||
2164 | |||
2165 | ctrl = er32(CTRL); | ||
2166 | ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); | ||
2167 | e1e_flush(); | ||
2168 | |||
2169 | udelay(phy->reset_delay_us); | ||
2170 | |||
2171 | ew32(CTRL, ctrl); | ||
2172 | e1e_flush(); | ||
2173 | |||
2174 | udelay(150); | ||
2175 | |||
2176 | phy->ops.release(hw); | ||
2177 | |||
2178 | return e1000_get_phy_cfg_done(hw); | ||
2179 | } | ||
2180 | |||
2181 | /** | ||
2182 | * e1000e_get_cfg_done - Generic configuration done | ||
2183 | * @hw: pointer to the HW structure | ||
2184 | * | ||
2185 | * Generic function to wait 10 milli-seconds for configuration to complete | ||
2186 | * and return success. | ||
2187 | **/ | ||
2188 | s32 e1000e_get_cfg_done(struct e1000_hw *hw) | ||
2189 | { | ||
2190 | mdelay(10); | ||
2191 | return 0; | ||
2192 | } | ||
2193 | |||
2194 | /** | ||
2195 | * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY | ||
2196 | * @hw: pointer to the HW structure | ||
2197 | * | ||
2198 | * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. | ||
2199 | **/ | ||
2200 | s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) | ||
2201 | { | ||
2202 | e_dbg("Running IGP 3 PHY init script\n"); | ||
2203 | |||
2204 | /* PHY init IGP 3 */ | ||
2205 | /* Enable rise/fall, 10-mode work in class-A */ | ||
2206 | e1e_wphy(hw, 0x2F5B, 0x9018); | ||
2207 | /* Remove all caps from Replica path filter */ | ||
2208 | e1e_wphy(hw, 0x2F52, 0x0000); | ||
2209 | /* Bias trimming for ADC, AFE and Driver (Default) */ | ||
2210 | e1e_wphy(hw, 0x2FB1, 0x8B24); | ||
2211 | /* Increase Hybrid poly bias */ | ||
2212 | e1e_wphy(hw, 0x2FB2, 0xF8F0); | ||
2213 | /* Add 4% to Tx amplitude in Gig mode */ | ||
2214 | e1e_wphy(hw, 0x2010, 0x10B0); | ||
2215 | /* Disable trimming (TTT) */ | ||
2216 | e1e_wphy(hw, 0x2011, 0x0000); | ||
2217 | /* Poly DC correction to 94.6% + 2% for all channels */ | ||
2218 | e1e_wphy(hw, 0x20DD, 0x249A); | ||
2219 | /* ABS DC correction to 95.9% */ | ||
2220 | e1e_wphy(hw, 0x20DE, 0x00D3); | ||
2221 | /* BG temp curve trim */ | ||
2222 | e1e_wphy(hw, 0x28B4, 0x04CE); | ||
2223 | /* Increasing ADC OPAMP stage 1 currents to max */ | ||
2224 | e1e_wphy(hw, 0x2F70, 0x29E4); | ||
2225 | /* Force 1000 ( required for enabling PHY regs configuration) */ | ||
2226 | e1e_wphy(hw, 0x0000, 0x0140); | ||
2227 | /* Set upd_freq to 6 */ | ||
2228 | e1e_wphy(hw, 0x1F30, 0x1606); | ||
2229 | /* Disable NPDFE */ | ||
2230 | e1e_wphy(hw, 0x1F31, 0xB814); | ||
2231 | /* Disable adaptive fixed FFE (Default) */ | ||
2232 | e1e_wphy(hw, 0x1F35, 0x002A); | ||
2233 | /* Enable FFE hysteresis */ | ||
2234 | e1e_wphy(hw, 0x1F3E, 0x0067); | ||
2235 | /* Fixed FFE for short cable lengths */ | ||
2236 | e1e_wphy(hw, 0x1F54, 0x0065); | ||
2237 | /* Fixed FFE for medium cable lengths */ | ||
2238 | e1e_wphy(hw, 0x1F55, 0x002A); | ||
2239 | /* Fixed FFE for long cable lengths */ | ||
2240 | e1e_wphy(hw, 0x1F56, 0x002A); | ||
2241 | /* Enable Adaptive Clip Threshold */ | ||
2242 | e1e_wphy(hw, 0x1F72, 0x3FB0); | ||
2243 | /* AHT reset limit to 1 */ | ||
2244 | e1e_wphy(hw, 0x1F76, 0xC0FF); | ||
2245 | /* Set AHT master delay to 127 msec */ | ||
2246 | e1e_wphy(hw, 0x1F77, 0x1DEC); | ||
2247 | /* Set scan bits for AHT */ | ||
2248 | e1e_wphy(hw, 0x1F78, 0xF9EF); | ||
2249 | /* Set AHT Preset bits */ | ||
2250 | e1e_wphy(hw, 0x1F79, 0x0210); | ||
2251 | /* Change integ_factor of channel A to 3 */ | ||
2252 | e1e_wphy(hw, 0x1895, 0x0003); | ||
2253 | /* Change prop_factor of channels BCD to 8 */ | ||
2254 | e1e_wphy(hw, 0x1796, 0x0008); | ||
2255 | /* Change cg_icount + enable integbp for channels BCD */ | ||
2256 | e1e_wphy(hw, 0x1798, 0xD008); | ||
2257 | /* | ||
2258 | * Change cg_icount + enable integbp + change prop_factor_master | ||
2259 | * to 8 for channel A | ||
2260 | */ | ||
2261 | e1e_wphy(hw, 0x1898, 0xD918); | ||
2262 | /* Disable AHT in Slave mode on channel A */ | ||
2263 | e1e_wphy(hw, 0x187A, 0x0800); | ||
2264 | /* | ||
2265 | * Enable LPLU and disable AN to 1000 in non-D0a states, | ||
2266 | * Enable SPD+B2B | ||
2267 | */ | ||
2268 | e1e_wphy(hw, 0x0019, 0x008D); | ||
2269 | /* Enable restart AN on an1000_dis change */ | ||
2270 | e1e_wphy(hw, 0x001B, 0x2080); | ||
2271 | /* Enable wh_fifo read clock in 10/100 modes */ | ||
2272 | e1e_wphy(hw, 0x0014, 0x0045); | ||
2273 | /* Restart AN, Speed selection is 1000 */ | ||
2274 | e1e_wphy(hw, 0x0000, 0x1340); | ||
2275 | |||
2276 | return 0; | ||
2277 | } | ||
2278 | |||
2279 | /* Internal function pointers */ | ||
2280 | |||
2281 | /** | ||
2282 | * e1000_get_phy_cfg_done - Generic PHY configuration done | ||
2283 | * @hw: pointer to the HW structure | ||
2284 | * | ||
2285 | * Return success if silicon family did not implement a family specific | ||
2286 | * get_cfg_done function. | ||
2287 | **/ | ||
2288 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) | ||
2289 | { | ||
2290 | if (hw->phy.ops.get_cfg_done) | ||
2291 | return hw->phy.ops.get_cfg_done(hw); | ||
2292 | |||
2293 | return 0; | ||
2294 | } | ||
2295 | |||
2296 | /** | ||
2297 | * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex | ||
2298 | * @hw: pointer to the HW structure | ||
2299 | * | ||
2300 | * When the silicon family has not implemented a forced speed/duplex | ||
2301 | * function for the PHY, simply return 0. | ||
2302 | **/ | ||
2303 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | ||
2304 | { | ||
2305 | if (hw->phy.ops.force_speed_duplex) | ||
2306 | return hw->phy.ops.force_speed_duplex(hw); | ||
2307 | |||
2308 | return 0; | ||
2309 | } | ||
2310 | |||
2311 | /** | ||
2312 | * e1000e_get_phy_type_from_id - Get PHY type from id | ||
2313 | * @phy_id: phy_id read from the phy | ||
2314 | * | ||
2315 | * Returns the phy type from the id. | ||
2316 | **/ | ||
2317 | enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) | ||
2318 | { | ||
2319 | enum e1000_phy_type phy_type = e1000_phy_unknown; | ||
2320 | |||
2321 | switch (phy_id) { | ||
2322 | case M88E1000_I_PHY_ID: | ||
2323 | case M88E1000_E_PHY_ID: | ||
2324 | case M88E1111_I_PHY_ID: | ||
2325 | case M88E1011_I_PHY_ID: | ||
2326 | phy_type = e1000_phy_m88; | ||
2327 | break; | ||
2328 | case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ | ||
2329 | phy_type = e1000_phy_igp_2; | ||
2330 | break; | ||
2331 | case GG82563_E_PHY_ID: | ||
2332 | phy_type = e1000_phy_gg82563; | ||
2333 | break; | ||
2334 | case IGP03E1000_E_PHY_ID: | ||
2335 | phy_type = e1000_phy_igp_3; | ||
2336 | break; | ||
2337 | case IFE_E_PHY_ID: | ||
2338 | case IFE_PLUS_E_PHY_ID: | ||
2339 | case IFE_C_E_PHY_ID: | ||
2340 | phy_type = e1000_phy_ife; | ||
2341 | break; | ||
2342 | case BME1000_E_PHY_ID: | ||
2343 | case BME1000_E_PHY_ID_R2: | ||
2344 | phy_type = e1000_phy_bm; | ||
2345 | break; | ||
2346 | case I82578_E_PHY_ID: | ||
2347 | phy_type = e1000_phy_82578; | ||
2348 | break; | ||
2349 | case I82577_E_PHY_ID: | ||
2350 | phy_type = e1000_phy_82577; | ||
2351 | break; | ||
2352 | case I82579_E_PHY_ID: | ||
2353 | phy_type = e1000_phy_82579; | ||
2354 | break; | ||
2355 | default: | ||
2356 | phy_type = e1000_phy_unknown; | ||
2357 | break; | ||
2358 | } | ||
2359 | return phy_type; | ||
2360 | } | ||
2361 | |||
2362 | /** | ||
2363 | * e1000e_determine_phy_address - Determines PHY address. | ||
2364 | * @hw: pointer to the HW structure | ||
2365 | * | ||
2366 | * This uses a trial and error method to loop through possible PHY | ||
2367 | * addresses. It tests each by reading the PHY ID registers and | ||
2368 | * checking for a match. | ||
2369 | **/ | ||
2370 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) | ||
2371 | { | ||
2372 | s32 ret_val = -E1000_ERR_PHY_TYPE; | ||
2373 | u32 phy_addr = 0; | ||
2374 | u32 i; | ||
2375 | enum e1000_phy_type phy_type = e1000_phy_unknown; | ||
2376 | |||
2377 | hw->phy.id = phy_type; | ||
2378 | |||
2379 | for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { | ||
2380 | hw->phy.addr = phy_addr; | ||
2381 | i = 0; | ||
2382 | |||
2383 | do { | ||
2384 | e1000e_get_phy_id(hw); | ||
2385 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); | ||
2386 | |||
2387 | /* | ||
2388 | * If phy_type is valid, break - we found our | ||
2389 | * PHY address | ||
2390 | */ | ||
2391 | if (phy_type != e1000_phy_unknown) { | ||
2392 | ret_val = 0; | ||
2393 | goto out; | ||
2394 | } | ||
2395 | usleep_range(1000, 2000); | ||
2396 | i++; | ||
2397 | } while (i < 10); | ||
2398 | } | ||
2399 | |||
2400 | out: | ||
2401 | return ret_val; | ||
2402 | } | ||
2403 | |||
2404 | /** | ||
2405 | * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address | ||
2406 | * @page: page to access | ||
2407 | * | ||
2408 | * Returns the phy address for the page requested. | ||
2409 | **/ | ||
2410 | static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) | ||
2411 | { | ||
2412 | u32 phy_addr = 2; | ||
2413 | |||
2414 | if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) | ||
2415 | phy_addr = 1; | ||
2416 | |||
2417 | return phy_addr; | ||
2418 | } | ||
2419 | |||
2420 | /** | ||
2421 | * e1000e_write_phy_reg_bm - Write BM PHY register | ||
2422 | * @hw: pointer to the HW structure | ||
2423 | * @offset: register offset to write to | ||
2424 | * @data: data to write at register offset | ||
2425 | * | ||
2426 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
2427 | * at the offset. Release any acquired semaphores before exiting. | ||
2428 | **/ | ||
2429 | s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | ||
2430 | { | ||
2431 | s32 ret_val; | ||
2432 | u32 page = offset >> IGP_PAGE_SHIFT; | ||
2433 | |||
2434 | ret_val = hw->phy.ops.acquire(hw); | ||
2435 | if (ret_val) | ||
2436 | return ret_val; | ||
2437 | |||
2438 | /* Page 800 works differently than the rest so it has its own func */ | ||
2439 | if (page == BM_WUC_PAGE) { | ||
2440 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | ||
2441 | false, false); | ||
2442 | goto out; | ||
2443 | } | ||
2444 | |||
2445 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | ||
2446 | |||
2447 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
2448 | u32 page_shift, page_select; | ||
2449 | |||
2450 | /* | ||
2451 | * Page select is register 31 for phy address 1 and 22 for | ||
2452 | * phy address 2 and 3. Page select is shifted only for | ||
2453 | * phy address 1. | ||
2454 | */ | ||
2455 | if (hw->phy.addr == 1) { | ||
2456 | page_shift = IGP_PAGE_SHIFT; | ||
2457 | page_select = IGP01E1000_PHY_PAGE_SELECT; | ||
2458 | } else { | ||
2459 | page_shift = 0; | ||
2460 | page_select = BM_PHY_PAGE_SELECT; | ||
2461 | } | ||
2462 | |||
2463 | /* Page is shifted left, PHY expects (page x 32) */ | ||
2464 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | ||
2465 | (page << page_shift)); | ||
2466 | if (ret_val) | ||
2467 | goto out; | ||
2468 | } | ||
2469 | |||
2470 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
2471 | data); | ||
2472 | |||
2473 | out: | ||
2474 | hw->phy.ops.release(hw); | ||
2475 | return ret_val; | ||
2476 | } | ||
2477 | |||
2478 | /** | ||
2479 | * e1000e_read_phy_reg_bm - Read BM PHY register | ||
2480 | * @hw: pointer to the HW structure | ||
2481 | * @offset: register offset to be read | ||
2482 | * @data: pointer to the read data | ||
2483 | * | ||
2484 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
2485 | * and storing the retrieved information in data. Release any acquired | ||
2486 | * semaphores before exiting. | ||
2487 | **/ | ||
2488 | s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2489 | { | ||
2490 | s32 ret_val; | ||
2491 | u32 page = offset >> IGP_PAGE_SHIFT; | ||
2492 | |||
2493 | ret_val = hw->phy.ops.acquire(hw); | ||
2494 | if (ret_val) | ||
2495 | return ret_val; | ||
2496 | |||
2497 | /* Page 800 works differently than the rest so it has its own func */ | ||
2498 | if (page == BM_WUC_PAGE) { | ||
2499 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | ||
2500 | true, false); | ||
2501 | goto out; | ||
2502 | } | ||
2503 | |||
2504 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | ||
2505 | |||
2506 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
2507 | u32 page_shift, page_select; | ||
2508 | |||
2509 | /* | ||
2510 | * Page select is register 31 for phy address 1 and 22 for | ||
2511 | * phy address 2 and 3. Page select is shifted only for | ||
2512 | * phy address 1. | ||
2513 | */ | ||
2514 | if (hw->phy.addr == 1) { | ||
2515 | page_shift = IGP_PAGE_SHIFT; | ||
2516 | page_select = IGP01E1000_PHY_PAGE_SELECT; | ||
2517 | } else { | ||
2518 | page_shift = 0; | ||
2519 | page_select = BM_PHY_PAGE_SELECT; | ||
2520 | } | ||
2521 | |||
2522 | /* Page is shifted left, PHY expects (page x 32) */ | ||
2523 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | ||
2524 | (page << page_shift)); | ||
2525 | if (ret_val) | ||
2526 | goto out; | ||
2527 | } | ||
2528 | |||
2529 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
2530 | data); | ||
2531 | out: | ||
2532 | hw->phy.ops.release(hw); | ||
2533 | return ret_val; | ||
2534 | } | ||
2535 | |||
2536 | /** | ||
2537 | * e1000e_read_phy_reg_bm2 - Read BM PHY register | ||
2538 | * @hw: pointer to the HW structure | ||
2539 | * @offset: register offset to be read | ||
2540 | * @data: pointer to the read data | ||
2541 | * | ||
2542 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
2543 | * and storing the retrieved information in data. Release any acquired | ||
2544 | * semaphores before exiting. | ||
2545 | **/ | ||
2546 | s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2547 | { | ||
2548 | s32 ret_val; | ||
2549 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | ||
2550 | |||
2551 | ret_val = hw->phy.ops.acquire(hw); | ||
2552 | if (ret_val) | ||
2553 | return ret_val; | ||
2554 | |||
2555 | /* Page 800 works differently than the rest so it has its own func */ | ||
2556 | if (page == BM_WUC_PAGE) { | ||
2557 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | ||
2558 | true, false); | ||
2559 | goto out; | ||
2560 | } | ||
2561 | |||
2562 | hw->phy.addr = 1; | ||
2563 | |||
2564 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
2565 | |||
2566 | /* Page is shifted left, PHY expects (page x 32) */ | ||
2567 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, | ||
2568 | page); | ||
2569 | |||
2570 | if (ret_val) | ||
2571 | goto out; | ||
2572 | } | ||
2573 | |||
2574 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
2575 | data); | ||
2576 | out: | ||
2577 | hw->phy.ops.release(hw); | ||
2578 | return ret_val; | ||
2579 | } | ||
2580 | |||
2581 | /** | ||
2582 | * e1000e_write_phy_reg_bm2 - Write BM PHY register | ||
2583 | * @hw: pointer to the HW structure | ||
2584 | * @offset: register offset to write to | ||
2585 | * @data: data to write at register offset | ||
2586 | * | ||
2587 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
2588 | * at the offset. Release any acquired semaphores before exiting. | ||
2589 | **/ | ||
2590 | s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | ||
2591 | { | ||
2592 | s32 ret_val; | ||
2593 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | ||
2594 | |||
2595 | ret_val = hw->phy.ops.acquire(hw); | ||
2596 | if (ret_val) | ||
2597 | return ret_val; | ||
2598 | |||
2599 | /* Page 800 works differently than the rest so it has its own func */ | ||
2600 | if (page == BM_WUC_PAGE) { | ||
2601 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | ||
2602 | false, false); | ||
2603 | goto out; | ||
2604 | } | ||
2605 | |||
2606 | hw->phy.addr = 1; | ||
2607 | |||
2608 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
2609 | /* Page is shifted left, PHY expects (page x 32) */ | ||
2610 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, | ||
2611 | page); | ||
2612 | |||
2613 | if (ret_val) | ||
2614 | goto out; | ||
2615 | } | ||
2616 | |||
2617 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
2618 | data); | ||
2619 | |||
2620 | out: | ||
2621 | hw->phy.ops.release(hw); | ||
2622 | return ret_val; | ||
2623 | } | ||
2624 | |||
2625 | /** | ||
2626 | * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers | ||
2627 | * @hw: pointer to the HW structure | ||
2628 | * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG | ||
2629 | * | ||
2630 | * Assumes semaphore already acquired and phy_reg points to a valid memory | ||
2631 | * address to store contents of the BM_WUC_ENABLE_REG register. | ||
2632 | **/ | ||
2633 | s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) | ||
2634 | { | ||
2635 | s32 ret_val; | ||
2636 | u16 temp; | ||
2637 | |||
2638 | /* All page select, port ctrl and wakeup registers use phy address 1 */ | ||
2639 | hw->phy.addr = 1; | ||
2640 | |||
2641 | /* Select Port Control Registers page */ | ||
2642 | ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | ||
2643 | if (ret_val) { | ||
2644 | e_dbg("Could not set Port Control page\n"); | ||
2645 | goto out; | ||
2646 | } | ||
2647 | |||
2648 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | ||
2649 | if (ret_val) { | ||
2650 | e_dbg("Could not read PHY register %d.%d\n", | ||
2651 | BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | ||
2652 | goto out; | ||
2653 | } | ||
2654 | |||
2655 | /* | ||
2656 | * Enable both PHY wakeup mode and Wakeup register page writes. | ||
2657 | * Prevent a power state change by disabling ME and Host PHY wakeup. | ||
2658 | */ | ||
2659 | temp = *phy_reg; | ||
2660 | temp |= BM_WUC_ENABLE_BIT; | ||
2661 | temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); | ||
2662 | |||
2663 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); | ||
2664 | if (ret_val) { | ||
2665 | e_dbg("Could not write PHY register %d.%d\n", | ||
2666 | BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | ||
2667 | goto out; | ||
2668 | } | ||
2669 | |||
2670 | /* Select Host Wakeup Registers page */ | ||
2671 | ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); | ||
2672 | |||
2673 | /* caller now able to write registers on the Wakeup registers page */ | ||
2674 | out: | ||
2675 | return ret_val; | ||
2676 | } | ||
2677 | |||
2678 | /** | ||
2679 | * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs | ||
2680 | * @hw: pointer to the HW structure | ||
2681 | * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG | ||
2682 | * | ||
2683 | * Restore BM_WUC_ENABLE_REG to its original value. | ||
2684 | * | ||
2685 | * Assumes semaphore already acquired and *phy_reg is the contents of the | ||
2686 | * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by | ||
2687 | * caller. | ||
2688 | **/ | ||
2689 | s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) | ||
2690 | { | ||
2691 | s32 ret_val = 0; | ||
2692 | |||
2693 | /* Select Port Control Registers page */ | ||
2694 | ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | ||
2695 | if (ret_val) { | ||
2696 | e_dbg("Could not set Port Control page\n"); | ||
2697 | goto out; | ||
2698 | } | ||
2699 | |||
2700 | /* Restore 769.17 to its original value */ | ||
2701 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); | ||
2702 | if (ret_val) | ||
2703 | e_dbg("Could not restore PHY register %d.%d\n", | ||
2704 | BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); | ||
2705 | out: | ||
2706 | return ret_val; | ||
2707 | } | ||
2708 | |||
2709 | /** | ||
2710 | * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register | ||
2711 | * @hw: pointer to the HW structure | ||
2712 | * @offset: register offset to be read or written | ||
2713 | * @data: pointer to the data to read or write | ||
2714 | * @read: determines if operation is read or write | ||
2715 | * @page_set: BM_WUC_PAGE already set and access enabled | ||
2716 | * | ||
2717 | * Read the PHY register at offset and store the retrieved information in | ||
2718 | * data, or write data to PHY register at offset. Note the procedure to | ||
2719 | * access the PHY wakeup registers is different than reading the other PHY | ||
2720 | * registers. It works as such: | ||
2721 | * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 | ||
2722 | * 2) Set page to 800 for host (801 if we were manageability) | ||
2723 | * 3) Write the address using the address opcode (0x11) | ||
2724 | * 4) Read or write the data using the data opcode (0x12) | ||
2725 | * 5) Restore 769.17.2 to its original value | ||
2726 | * | ||
2727 | * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and | ||
2728 | * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). | ||
2729 | * | ||
2730 | * Assumes semaphore is already acquired. When page_set==true, assumes | ||
2731 | * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack | ||
2732 | * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). | ||
2733 | **/ | ||
2734 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | ||
2735 | u16 *data, bool read, bool page_set) | ||
2736 | { | ||
2737 | s32 ret_val; | ||
2738 | u16 reg = BM_PHY_REG_NUM(offset); | ||
2739 | u16 page = BM_PHY_REG_PAGE(offset); | ||
2740 | u16 phy_reg = 0; | ||
2741 | |||
2742 | /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ | ||
2743 | if ((hw->mac.type == e1000_pchlan) && | ||
2744 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | ||
2745 | e_dbg("Attempting to access page %d while gig enabled.\n", | ||
2746 | page); | ||
2747 | |||
2748 | if (!page_set) { | ||
2749 | /* Enable access to PHY wakeup registers */ | ||
2750 | ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); | ||
2751 | if (ret_val) { | ||
2752 | e_dbg("Could not enable PHY wakeup reg access\n"); | ||
2753 | goto out; | ||
2754 | } | ||
2755 | } | ||
2756 | |||
2757 | e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); | ||
2758 | |||
2759 | /* Write the Wakeup register page offset value using opcode 0x11 */ | ||
2760 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); | ||
2761 | if (ret_val) { | ||
2762 | e_dbg("Could not write address opcode to page %d\n", page); | ||
2763 | goto out; | ||
2764 | } | ||
2765 | |||
2766 | if (read) { | ||
2767 | /* Read the Wakeup register page value using opcode 0x12 */ | ||
2768 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | ||
2769 | data); | ||
2770 | } else { | ||
2771 | /* Write the Wakeup register page value using opcode 0x12 */ | ||
2772 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | ||
2773 | *data); | ||
2774 | } | ||
2775 | |||
2776 | if (ret_val) { | ||
2777 | e_dbg("Could not access PHY reg %d.%d\n", page, reg); | ||
2778 | goto out; | ||
2779 | } | ||
2780 | |||
2781 | if (!page_set) | ||
2782 | ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); | ||
2783 | |||
2784 | out: | ||
2785 | return ret_val; | ||
2786 | } | ||
2787 | |||
2788 | /** | ||
2789 | * e1000_power_up_phy_copper - Restore copper link in case of PHY power down | ||
2790 | * @hw: pointer to the HW structure | ||
2791 | * | ||
2792 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2793 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2794 | * settings. | ||
2795 | **/ | ||
2796 | void e1000_power_up_phy_copper(struct e1000_hw *hw) | ||
2797 | { | ||
2798 | u16 mii_reg = 0; | ||
2799 | |||
2800 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2801 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2802 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
2803 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2804 | } | ||
2805 | |||
2806 | /** | ||
2807 | * e1000_power_down_phy_copper - Restore copper link in case of PHY power down | ||
2808 | * @hw: pointer to the HW structure | ||
2809 | * | ||
2810 | * In the case of a PHY power down to save power, or to turn off link during a | ||
2811 | * driver unload, or wake on lan is not enabled, restore the link to previous | ||
2812 | * settings. | ||
2813 | **/ | ||
2814 | void e1000_power_down_phy_copper(struct e1000_hw *hw) | ||
2815 | { | ||
2816 | u16 mii_reg = 0; | ||
2817 | |||
2818 | /* The PHY will retain its settings across a power down/up cycle */ | ||
2819 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | ||
2820 | mii_reg |= MII_CR_POWER_DOWN; | ||
2821 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | ||
2822 | usleep_range(1000, 2000); | ||
2823 | } | ||
2824 | |||
2825 | /** | ||
2826 | * e1000e_commit_phy - Soft PHY reset | ||
2827 | * @hw: pointer to the HW structure | ||
2828 | * | ||
2829 | * Performs a soft PHY reset on those that apply. This is a function pointer | ||
2830 | * entry point called by drivers. | ||
2831 | **/ | ||
2832 | s32 e1000e_commit_phy(struct e1000_hw *hw) | ||
2833 | { | ||
2834 | if (hw->phy.ops.commit) | ||
2835 | return hw->phy.ops.commit(hw); | ||
2836 | |||
2837 | return 0; | ||
2838 | } | ||
2839 | |||
2840 | /** | ||
2841 | * e1000_set_d0_lplu_state - Sets low power link up state for D0 | ||
2842 | * @hw: pointer to the HW structure | ||
2843 | * @active: boolean used to enable/disable lplu | ||
2844 | * | ||
2845 | * Success returns 0, Failure returns 1 | ||
2846 | * | ||
2847 | * The low power link up (lplu) state is set to the power management level D0 | ||
2848 | * and SmartSpeed is disabled when active is true, else clear lplu for D0 | ||
2849 | * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU | ||
2850 | * is used during Dx states where the power conservation is most important. | ||
2851 | * During driver activity, SmartSpeed should be enabled so performance is | ||
2852 | * maintained. This is a function pointer entry point called by drivers. | ||
2853 | **/ | ||
2854 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | ||
2855 | { | ||
2856 | if (hw->phy.ops.set_d0_lplu_state) | ||
2857 | return hw->phy.ops.set_d0_lplu_state(hw, active); | ||
2858 | |||
2859 | return 0; | ||
2860 | } | ||
2861 | |||
2862 | /** | ||
2863 | * __e1000_read_phy_reg_hv - Read HV PHY register | ||
2864 | * @hw: pointer to the HW structure | ||
2865 | * @offset: register offset to be read | ||
2866 | * @data: pointer to the read data | ||
2867 | * @locked: semaphore has already been acquired or not | ||
2868 | * | ||
2869 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
2870 | * and stores the retrieved information in data. Release any acquired | ||
2871 | * semaphore before exiting. | ||
2872 | **/ | ||
2873 | static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, | ||
2874 | bool locked, bool page_set) | ||
2875 | { | ||
2876 | s32 ret_val; | ||
2877 | u16 page = BM_PHY_REG_PAGE(offset); | ||
2878 | u16 reg = BM_PHY_REG_NUM(offset); | ||
2879 | u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); | ||
2880 | |||
2881 | if (!locked) { | ||
2882 | ret_val = hw->phy.ops.acquire(hw); | ||
2883 | if (ret_val) | ||
2884 | return ret_val; | ||
2885 | } | ||
2886 | |||
2887 | /* Page 800 works differently than the rest so it has its own func */ | ||
2888 | if (page == BM_WUC_PAGE) { | ||
2889 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | ||
2890 | true, page_set); | ||
2891 | goto out; | ||
2892 | } | ||
2893 | |||
2894 | if (page > 0 && page < HV_INTC_FC_PAGE_START) { | ||
2895 | ret_val = e1000_access_phy_debug_regs_hv(hw, offset, | ||
2896 | data, true); | ||
2897 | goto out; | ||
2898 | } | ||
2899 | |||
2900 | if (!page_set) { | ||
2901 | if (page == HV_INTC_FC_PAGE_START) | ||
2902 | page = 0; | ||
2903 | |||
2904 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | ||
2905 | /* Page is shifted left, PHY expects (page x 32) */ | ||
2906 | ret_val = e1000_set_page_igp(hw, | ||
2907 | (page << IGP_PAGE_SHIFT)); | ||
2908 | |||
2909 | hw->phy.addr = phy_addr; | ||
2910 | |||
2911 | if (ret_val) | ||
2912 | goto out; | ||
2913 | } | ||
2914 | } | ||
2915 | |||
2916 | e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, | ||
2917 | page << IGP_PAGE_SHIFT, reg); | ||
2918 | |||
2919 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | ||
2920 | data); | ||
2921 | out: | ||
2922 | if (!locked) | ||
2923 | hw->phy.ops.release(hw); | ||
2924 | |||
2925 | return ret_val; | ||
2926 | } | ||
2927 | |||
2928 | /** | ||
2929 | * e1000_read_phy_reg_hv - Read HV PHY register | ||
2930 | * @hw: pointer to the HW structure | ||
2931 | * @offset: register offset to be read | ||
2932 | * @data: pointer to the read data | ||
2933 | * | ||
2934 | * Acquires semaphore then reads the PHY register at offset and stores | ||
2935 | * the retrieved information in data. Release the acquired semaphore | ||
2936 | * before exiting. | ||
2937 | **/ | ||
2938 | s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2939 | { | ||
2940 | return __e1000_read_phy_reg_hv(hw, offset, data, false, false); | ||
2941 | } | ||
2942 | |||
2943 | /** | ||
2944 | * e1000_read_phy_reg_hv_locked - Read HV PHY register | ||
2945 | * @hw: pointer to the HW structure | ||
2946 | * @offset: register offset to be read | ||
2947 | * @data: pointer to the read data | ||
2948 | * | ||
2949 | * Reads the PHY register at offset and stores the retrieved information | ||
2950 | * in data. Assumes semaphore already acquired. | ||
2951 | **/ | ||
2952 | s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2953 | { | ||
2954 | return __e1000_read_phy_reg_hv(hw, offset, data, true, false); | ||
2955 | } | ||
2956 | |||
2957 | /** | ||
2958 | * e1000_read_phy_reg_page_hv - Read HV PHY register | ||
2959 | * @hw: pointer to the HW structure | ||
2960 | * @offset: register offset to write to | ||
2961 | * @data: data to write at register offset | ||
2962 | * | ||
2963 | * Reads the PHY register at offset and stores the retrieved information | ||
2964 | * in data. Assumes semaphore already acquired and page already set. | ||
2965 | **/ | ||
2966 | s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2967 | { | ||
2968 | return __e1000_read_phy_reg_hv(hw, offset, data, true, true); | ||
2969 | } | ||
2970 | |||
2971 | /** | ||
2972 | * __e1000_write_phy_reg_hv - Write HV PHY register | ||
2973 | * @hw: pointer to the HW structure | ||
2974 | * @offset: register offset to write to | ||
2975 | * @data: data to write at register offset | ||
2976 | * @locked: semaphore has already been acquired or not | ||
2977 | * | ||
2978 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
2979 | * at the offset. Release any acquired semaphores before exiting. | ||
2980 | **/ | ||
2981 | static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, | ||
2982 | bool locked, bool page_set) | ||
2983 | { | ||
2984 | s32 ret_val; | ||
2985 | u16 page = BM_PHY_REG_PAGE(offset); | ||
2986 | u16 reg = BM_PHY_REG_NUM(offset); | ||
2987 | u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); | ||
2988 | |||
2989 | if (!locked) { | ||
2990 | ret_val = hw->phy.ops.acquire(hw); | ||
2991 | if (ret_val) | ||
2992 | return ret_val; | ||
2993 | } | ||
2994 | |||
2995 | /* Page 800 works differently than the rest so it has its own func */ | ||
2996 | if (page == BM_WUC_PAGE) { | ||
2997 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | ||
2998 | false, page_set); | ||
2999 | goto out; | ||
3000 | } | ||
3001 | |||
3002 | if (page > 0 && page < HV_INTC_FC_PAGE_START) { | ||
3003 | ret_val = e1000_access_phy_debug_regs_hv(hw, offset, | ||
3004 | &data, false); | ||
3005 | goto out; | ||
3006 | } | ||
3007 | |||
3008 | if (!page_set) { | ||
3009 | if (page == HV_INTC_FC_PAGE_START) | ||
3010 | page = 0; | ||
3011 | |||
3012 | /* | ||
3013 | * Workaround MDIO accesses being disabled after entering IEEE | ||
3014 | * Power Down (when bit 11 of the PHY Control register is set) | ||
3015 | */ | ||
3016 | if ((hw->phy.type == e1000_phy_82578) && | ||
3017 | (hw->phy.revision >= 1) && | ||
3018 | (hw->phy.addr == 2) && | ||
3019 | ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { | ||
3020 | u16 data2 = 0x7EFF; | ||
3021 | ret_val = e1000_access_phy_debug_regs_hv(hw, | ||
3022 | (1 << 6) | 0x3, | ||
3023 | &data2, false); | ||
3024 | if (ret_val) | ||
3025 | goto out; | ||
3026 | } | ||
3027 | |||
3028 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | ||
3029 | /* Page is shifted left, PHY expects (page x 32) */ | ||
3030 | ret_val = e1000_set_page_igp(hw, | ||
3031 | (page << IGP_PAGE_SHIFT)); | ||
3032 | |||
3033 | hw->phy.addr = phy_addr; | ||
3034 | |||
3035 | if (ret_val) | ||
3036 | goto out; | ||
3037 | } | ||
3038 | } | ||
3039 | |||
3040 | e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, | ||
3041 | page << IGP_PAGE_SHIFT, reg); | ||
3042 | |||
3043 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | ||
3044 | data); | ||
3045 | |||
3046 | out: | ||
3047 | if (!locked) | ||
3048 | hw->phy.ops.release(hw); | ||
3049 | |||
3050 | return ret_val; | ||
3051 | } | ||
3052 | |||
3053 | /** | ||
3054 | * e1000_write_phy_reg_hv - Write HV PHY register | ||
3055 | * @hw: pointer to the HW structure | ||
3056 | * @offset: register offset to write to | ||
3057 | * @data: data to write at register offset | ||
3058 | * | ||
3059 | * Acquires semaphore then writes the data to PHY register at the offset. | ||
3060 | * Release the acquired semaphores before exiting. | ||
3061 | **/ | ||
3062 | s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) | ||
3063 | { | ||
3064 | return __e1000_write_phy_reg_hv(hw, offset, data, false, false); | ||
3065 | } | ||
3066 | |||
3067 | /** | ||
3068 | * e1000_write_phy_reg_hv_locked - Write HV PHY register | ||
3069 | * @hw: pointer to the HW structure | ||
3070 | * @offset: register offset to write to | ||
3071 | * @data: data to write at register offset | ||
3072 | * | ||
3073 | * Writes the data to PHY register at the offset. Assumes semaphore | ||
3074 | * already acquired. | ||
3075 | **/ | ||
3076 | s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
3077 | { | ||
3078 | return __e1000_write_phy_reg_hv(hw, offset, data, true, false); | ||
3079 | } | ||
3080 | |||
3081 | /** | ||
3082 | * e1000_write_phy_reg_page_hv - Write HV PHY register | ||
3083 | * @hw: pointer to the HW structure | ||
3084 | * @offset: register offset to write to | ||
3085 | * @data: data to write at register offset | ||
3086 | * | ||
3087 | * Writes the data to PHY register at the offset. Assumes semaphore | ||
3088 | * already acquired and page already set. | ||
3089 | **/ | ||
3090 | s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) | ||
3091 | { | ||
3092 | return __e1000_write_phy_reg_hv(hw, offset, data, true, true); | ||
3093 | } | ||
3094 | |||
3095 | /** | ||
3096 | * e1000_get_phy_addr_for_hv_page - Get PHY address based on page | ||
3097 | * @page: page to be accessed | ||
3098 | **/ | ||
3099 | static u32 e1000_get_phy_addr_for_hv_page(u32 page) | ||
3100 | { | ||
3101 | u32 phy_addr = 2; | ||
3102 | |||
3103 | if (page >= HV_INTC_FC_PAGE_START) | ||
3104 | phy_addr = 1; | ||
3105 | |||
3106 | return phy_addr; | ||
3107 | } | ||
3108 | |||
3109 | /** | ||
3110 | * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers | ||
3111 | * @hw: pointer to the HW structure | ||
3112 | * @offset: register offset to be read or written | ||
3113 | * @data: pointer to the data to be read or written | ||
3114 | * @read: determines if operation is read or write | ||
3115 | * | ||
3116 | * Reads the PHY register at offset and stores the retreived information | ||
3117 | * in data. Assumes semaphore already acquired. Note that the procedure | ||
3118 | * to access these regs uses the address port and data port to read/write. | ||
3119 | * These accesses done with PHY address 2 and without using pages. | ||
3120 | **/ | ||
3121 | static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | ||
3122 | u16 *data, bool read) | ||
3123 | { | ||
3124 | s32 ret_val; | ||
3125 | u32 addr_reg = 0; | ||
3126 | u32 data_reg = 0; | ||
3127 | |||
3128 | /* This takes care of the difference with desktop vs mobile phy */ | ||
3129 | addr_reg = (hw->phy.type == e1000_phy_82578) ? | ||
3130 | I82578_ADDR_REG : I82577_ADDR_REG; | ||
3131 | data_reg = addr_reg + 1; | ||
3132 | |||
3133 | /* All operations in this function are phy address 2 */ | ||
3134 | hw->phy.addr = 2; | ||
3135 | |||
3136 | /* masking with 0x3F to remove the page from offset */ | ||
3137 | ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); | ||
3138 | if (ret_val) { | ||
3139 | e_dbg("Could not write the Address Offset port register\n"); | ||
3140 | goto out; | ||
3141 | } | ||
3142 | |||
3143 | /* Read or write the data value next */ | ||
3144 | if (read) | ||
3145 | ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); | ||
3146 | else | ||
3147 | ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); | ||
3148 | |||
3149 | if (ret_val) { | ||
3150 | e_dbg("Could not access the Data port register\n"); | ||
3151 | goto out; | ||
3152 | } | ||
3153 | |||
3154 | out: | ||
3155 | return ret_val; | ||
3156 | } | ||
3157 | |||
3158 | /** | ||
3159 | * e1000_link_stall_workaround_hv - Si workaround | ||
3160 | * @hw: pointer to the HW structure | ||
3161 | * | ||
3162 | * This function works around a Si bug where the link partner can get | ||
3163 | * a link up indication before the PHY does. If small packets are sent | ||
3164 | * by the link partner they can be placed in the packet buffer without | ||
3165 | * being properly accounted for by the PHY and will stall preventing | ||
3166 | * further packets from being received. The workaround is to clear the | ||
3167 | * packet buffer after the PHY detects link up. | ||
3168 | **/ | ||
3169 | s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | ||
3170 | { | ||
3171 | s32 ret_val = 0; | ||
3172 | u16 data; | ||
3173 | |||
3174 | if (hw->phy.type != e1000_phy_82578) | ||
3175 | goto out; | ||
3176 | |||
3177 | /* Do not apply workaround if in PHY loopback bit 14 set */ | ||
3178 | e1e_rphy(hw, PHY_CONTROL, &data); | ||
3179 | if (data & PHY_CONTROL_LB) | ||
3180 | goto out; | ||
3181 | |||
3182 | /* check if link is up and at 1Gbps */ | ||
3183 | ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); | ||
3184 | if (ret_val) | ||
3185 | goto out; | ||
3186 | |||
3187 | data &= BM_CS_STATUS_LINK_UP | | ||
3188 | BM_CS_STATUS_RESOLVED | | ||
3189 | BM_CS_STATUS_SPEED_MASK; | ||
3190 | |||
3191 | if (data != (BM_CS_STATUS_LINK_UP | | ||
3192 | BM_CS_STATUS_RESOLVED | | ||
3193 | BM_CS_STATUS_SPEED_1000)) | ||
3194 | goto out; | ||
3195 | |||
3196 | mdelay(200); | ||
3197 | |||
3198 | /* flush the packets in the fifo buffer */ | ||
3199 | ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | | ||
3200 | HV_MUX_DATA_CTRL_FORCE_SPEED); | ||
3201 | if (ret_val) | ||
3202 | goto out; | ||
3203 | |||
3204 | ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); | ||
3205 | |||
3206 | out: | ||
3207 | return ret_val; | ||
3208 | } | ||
3209 | |||
3210 | /** | ||
3211 | * e1000_check_polarity_82577 - Checks the polarity. | ||
3212 | * @hw: pointer to the HW structure | ||
3213 | * | ||
3214 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
3215 | * | ||
3216 | * Polarity is determined based on the PHY specific status register. | ||
3217 | **/ | ||
3218 | s32 e1000_check_polarity_82577(struct e1000_hw *hw) | ||
3219 | { | ||
3220 | struct e1000_phy_info *phy = &hw->phy; | ||
3221 | s32 ret_val; | ||
3222 | u16 data; | ||
3223 | |||
3224 | ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); | ||
3225 | |||
3226 | if (!ret_val) | ||
3227 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | ||
3228 | ? e1000_rev_polarity_reversed | ||
3229 | : e1000_rev_polarity_normal; | ||
3230 | |||
3231 | return ret_val; | ||
3232 | } | ||
3233 | |||
3234 | /** | ||
3235 | * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY | ||
3236 | * @hw: pointer to the HW structure | ||
3237 | * | ||
3238 | * Calls the PHY setup function to force speed and duplex. | ||
3239 | **/ | ||
3240 | s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | ||
3241 | { | ||
3242 | struct e1000_phy_info *phy = &hw->phy; | ||
3243 | s32 ret_val; | ||
3244 | u16 phy_data; | ||
3245 | bool link; | ||
3246 | |||
3247 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); | ||
3248 | if (ret_val) | ||
3249 | goto out; | ||
3250 | |||
3251 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | ||
3252 | |||
3253 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); | ||
3254 | if (ret_val) | ||
3255 | goto out; | ||
3256 | |||
3257 | udelay(1); | ||
3258 | |||
3259 | if (phy->autoneg_wait_to_complete) { | ||
3260 | e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); | ||
3261 | |||
3262 | ret_val = e1000e_phy_has_link_generic(hw, | ||
3263 | PHY_FORCE_LIMIT, | ||
3264 | 100000, | ||
3265 | &link); | ||
3266 | if (ret_val) | ||
3267 | goto out; | ||
3268 | |||
3269 | if (!link) | ||
3270 | e_dbg("Link taking longer than expected.\n"); | ||
3271 | |||
3272 | /* Try once more */ | ||
3273 | ret_val = e1000e_phy_has_link_generic(hw, | ||
3274 | PHY_FORCE_LIMIT, | ||
3275 | 100000, | ||
3276 | &link); | ||
3277 | if (ret_val) | ||
3278 | goto out; | ||
3279 | } | ||
3280 | |||
3281 | out: | ||
3282 | return ret_val; | ||
3283 | } | ||
3284 | |||
3285 | /** | ||
3286 | * e1000_get_phy_info_82577 - Retrieve I82577 PHY information | ||
3287 | * @hw: pointer to the HW structure | ||
3288 | * | ||
3289 | * Read PHY status to determine if link is up. If link is up, then | ||
3290 | * set/determine 10base-T extended distance and polarity correction. Read | ||
3291 | * PHY port status to determine MDI/MDIx and speed. Based on the speed, | ||
3292 | * determine on the cable length, local and remote receiver. | ||
3293 | **/ | ||
3294 | s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | ||
3295 | { | ||
3296 | struct e1000_phy_info *phy = &hw->phy; | ||
3297 | s32 ret_val; | ||
3298 | u16 data; | ||
3299 | bool link; | ||
3300 | |||
3301 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | ||
3302 | if (ret_val) | ||
3303 | goto out; | ||
3304 | |||
3305 | if (!link) { | ||
3306 | e_dbg("Phy info is only valid if link is up\n"); | ||
3307 | ret_val = -E1000_ERR_CONFIG; | ||
3308 | goto out; | ||
3309 | } | ||
3310 | |||
3311 | phy->polarity_correction = true; | ||
3312 | |||
3313 | ret_val = e1000_check_polarity_82577(hw); | ||
3314 | if (ret_val) | ||
3315 | goto out; | ||
3316 | |||
3317 | ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); | ||
3318 | if (ret_val) | ||
3319 | goto out; | ||
3320 | |||
3321 | phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; | ||
3322 | |||
3323 | if ((data & I82577_PHY_STATUS2_SPEED_MASK) == | ||
3324 | I82577_PHY_STATUS2_SPEED_1000MBPS) { | ||
3325 | ret_val = hw->phy.ops.get_cable_length(hw); | ||
3326 | if (ret_val) | ||
3327 | goto out; | ||
3328 | |||
3329 | ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); | ||
3330 | if (ret_val) | ||
3331 | goto out; | ||
3332 | |||
3333 | phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | ||
3334 | ? e1000_1000t_rx_status_ok | ||
3335 | : e1000_1000t_rx_status_not_ok; | ||
3336 | |||
3337 | phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | ||
3338 | ? e1000_1000t_rx_status_ok | ||
3339 | : e1000_1000t_rx_status_not_ok; | ||
3340 | } else { | ||
3341 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
3342 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
3343 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
3344 | } | ||
3345 | |||
3346 | out: | ||
3347 | return ret_val; | ||
3348 | } | ||
3349 | |||
3350 | /** | ||
3351 | * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY | ||
3352 | * @hw: pointer to the HW structure | ||
3353 | * | ||
3354 | * Reads the diagnostic status register and verifies result is valid before | ||
3355 | * placing it in the phy_cable_length field. | ||
3356 | **/ | ||
3357 | s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | ||
3358 | { | ||
3359 | struct e1000_phy_info *phy = &hw->phy; | ||
3360 | s32 ret_val; | ||
3361 | u16 phy_data, length; | ||
3362 | |||
3363 | ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); | ||
3364 | if (ret_val) | ||
3365 | goto out; | ||
3366 | |||
3367 | length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> | ||
3368 | I82577_DSTATUS_CABLE_LENGTH_SHIFT; | ||
3369 | |||
3370 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | ||
3371 | ret_val = -E1000_ERR_PHY; | ||
3372 | |||
3373 | phy->cable_length = length; | ||
3374 | |||
3375 | out: | ||
3376 | return ret_val; | ||
3377 | } | ||