diff options
Diffstat (limited to 'drivers/net/e1000e/ethtool.c')
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 1774 |
1 files changed, 1774 insertions, 0 deletions
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c new file mode 100644 index 000000000000..0e80406bfbd7 --- /dev/null +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -0,0 +1,1774 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/1000 Linux driver | ||
4 | Copyright(c) 1999 - 2007 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* ethtool support for e1000 */ | ||
30 | |||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/ethtool.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | #include "e1000.h" | ||
37 | |||
38 | struct e1000_stats { | ||
39 | char stat_string[ETH_GSTRING_LEN]; | ||
40 | int sizeof_stat; | ||
41 | int stat_offset; | ||
42 | }; | ||
43 | |||
44 | #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ | ||
45 | offsetof(struct e1000_adapter, m) | ||
46 | static const struct e1000_stats e1000_gstrings_stats[] = { | ||
47 | { "rx_packets", E1000_STAT(stats.gprc) }, | ||
48 | { "tx_packets", E1000_STAT(stats.gptc) }, | ||
49 | { "rx_bytes", E1000_STAT(stats.gorcl) }, | ||
50 | { "tx_bytes", E1000_STAT(stats.gotcl) }, | ||
51 | { "rx_broadcast", E1000_STAT(stats.bprc) }, | ||
52 | { "tx_broadcast", E1000_STAT(stats.bptc) }, | ||
53 | { "rx_multicast", E1000_STAT(stats.mprc) }, | ||
54 | { "tx_multicast", E1000_STAT(stats.mptc) }, | ||
55 | { "rx_errors", E1000_STAT(net_stats.rx_errors) }, | ||
56 | { "tx_errors", E1000_STAT(net_stats.tx_errors) }, | ||
57 | { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, | ||
58 | { "multicast", E1000_STAT(stats.mprc) }, | ||
59 | { "collisions", E1000_STAT(stats.colc) }, | ||
60 | { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, | ||
61 | { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, | ||
62 | { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, | ||
63 | { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, | ||
64 | { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, | ||
65 | { "rx_missed_errors", E1000_STAT(stats.mpc) }, | ||
66 | { "tx_aborted_errors", E1000_STAT(stats.ecol) }, | ||
67 | { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, | ||
68 | { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, | ||
69 | { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, | ||
70 | { "tx_window_errors", E1000_STAT(stats.latecol) }, | ||
71 | { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, | ||
72 | { "tx_deferred_ok", E1000_STAT(stats.dc) }, | ||
73 | { "tx_single_coll_ok", E1000_STAT(stats.scc) }, | ||
74 | { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, | ||
75 | { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, | ||
76 | { "tx_restart_queue", E1000_STAT(restart_queue) }, | ||
77 | { "rx_long_length_errors", E1000_STAT(stats.roc) }, | ||
78 | { "rx_short_length_errors", E1000_STAT(stats.ruc) }, | ||
79 | { "rx_align_errors", E1000_STAT(stats.algnerrc) }, | ||
80 | { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, | ||
81 | { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, | ||
82 | { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, | ||
83 | { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, | ||
84 | { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, | ||
85 | { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, | ||
86 | { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, | ||
87 | { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, | ||
88 | { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, | ||
89 | { "rx_header_split", E1000_STAT(rx_hdr_split) }, | ||
90 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, | ||
91 | { "tx_smbus", E1000_STAT(stats.mgptc) }, | ||
92 | { "rx_smbus", E1000_STAT(stats.mgprc) }, | ||
93 | { "dropped_smbus", E1000_STAT(stats.mgpdc) }, | ||
94 | { "rx_dma_failed", E1000_STAT(rx_dma_failed) }, | ||
95 | { "tx_dma_failed", E1000_STAT(tx_dma_failed) }, | ||
96 | }; | ||
97 | |||
98 | #define E1000_GLOBAL_STATS_LEN \ | ||
99 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) | ||
100 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) | ||
101 | static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { | ||
102 | "Register test (offline)", "Eeprom test (offline)", | ||
103 | "Interrupt test (offline)", "Loopback test (offline)", | ||
104 | "Link test (on/offline)" | ||
105 | }; | ||
106 | #define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN | ||
107 | |||
108 | static int e1000_get_settings(struct net_device *netdev, | ||
109 | struct ethtool_cmd *ecmd) | ||
110 | { | ||
111 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
112 | struct e1000_hw *hw = &adapter->hw; | ||
113 | |||
114 | if (hw->media_type == e1000_media_type_copper) { | ||
115 | |||
116 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
117 | SUPPORTED_10baseT_Full | | ||
118 | SUPPORTED_100baseT_Half | | ||
119 | SUPPORTED_100baseT_Full | | ||
120 | SUPPORTED_1000baseT_Full | | ||
121 | SUPPORTED_Autoneg | | ||
122 | SUPPORTED_TP); | ||
123 | if (hw->phy.type == e1000_phy_ife) | ||
124 | ecmd->supported &= ~SUPPORTED_1000baseT_Full; | ||
125 | ecmd->advertising = ADVERTISED_TP; | ||
126 | |||
127 | if (hw->mac.autoneg == 1) { | ||
128 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
129 | /* the e1000 autoneg seems to match ethtool nicely */ | ||
130 | ecmd->advertising |= hw->phy.autoneg_advertised; | ||
131 | } | ||
132 | |||
133 | ecmd->port = PORT_TP; | ||
134 | ecmd->phy_address = hw->phy.addr; | ||
135 | ecmd->transceiver = XCVR_INTERNAL; | ||
136 | |||
137 | } else { | ||
138 | ecmd->supported = (SUPPORTED_1000baseT_Full | | ||
139 | SUPPORTED_FIBRE | | ||
140 | SUPPORTED_Autoneg); | ||
141 | |||
142 | ecmd->advertising = (ADVERTISED_1000baseT_Full | | ||
143 | ADVERTISED_FIBRE | | ||
144 | ADVERTISED_Autoneg); | ||
145 | |||
146 | ecmd->port = PORT_FIBRE; | ||
147 | ecmd->transceiver = XCVR_EXTERNAL; | ||
148 | } | ||
149 | |||
150 | if (er32(STATUS) & E1000_STATUS_LU) { | ||
151 | |||
152 | adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed, | ||
153 | &adapter->link_duplex); | ||
154 | ecmd->speed = adapter->link_speed; | ||
155 | |||
156 | /* unfortunately FULL_DUPLEX != DUPLEX_FULL | ||
157 | * and HALF_DUPLEX != DUPLEX_HALF */ | ||
158 | |||
159 | if (adapter->link_duplex == FULL_DUPLEX) | ||
160 | ecmd->duplex = DUPLEX_FULL; | ||
161 | else | ||
162 | ecmd->duplex = DUPLEX_HALF; | ||
163 | } else { | ||
164 | ecmd->speed = -1; | ||
165 | ecmd->duplex = -1; | ||
166 | } | ||
167 | |||
168 | ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || | ||
169 | hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | ||
174 | { | ||
175 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
176 | |||
177 | mac->autoneg = 0; | ||
178 | |||
179 | /* Fiber NICs only allow 1000 gbps Full duplex */ | ||
180 | if ((adapter->hw.media_type == e1000_media_type_fiber) && | ||
181 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | ||
182 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " | ||
183 | "configuration\n"); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | switch (spddplx) { | ||
188 | case SPEED_10 + DUPLEX_HALF: | ||
189 | mac->forced_speed_duplex = ADVERTISE_10_HALF; | ||
190 | break; | ||
191 | case SPEED_10 + DUPLEX_FULL: | ||
192 | mac->forced_speed_duplex = ADVERTISE_10_FULL; | ||
193 | break; | ||
194 | case SPEED_100 + DUPLEX_HALF: | ||
195 | mac->forced_speed_duplex = ADVERTISE_100_HALF; | ||
196 | break; | ||
197 | case SPEED_100 + DUPLEX_FULL: | ||
198 | mac->forced_speed_duplex = ADVERTISE_100_FULL; | ||
199 | break; | ||
200 | case SPEED_1000 + DUPLEX_FULL: | ||
201 | mac->autoneg = 1; | ||
202 | adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; | ||
203 | break; | ||
204 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | ||
205 | default: | ||
206 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " | ||
207 | "configuration\n"); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static int e1000_set_settings(struct net_device *netdev, | ||
214 | struct ethtool_cmd *ecmd) | ||
215 | { | ||
216 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
217 | struct e1000_hw *hw = &adapter->hw; | ||
218 | |||
219 | /* When SoL/IDER sessions are active, autoneg/speed/duplex | ||
220 | * cannot be changed */ | ||
221 | if (e1000_check_reset_block(hw)) { | ||
222 | ndev_err(netdev, "Cannot change link " | ||
223 | "characteristics when SoL/IDER is active.\n"); | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | |||
227 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
228 | msleep(1); | ||
229 | |||
230 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
231 | hw->mac.autoneg = 1; | ||
232 | if (hw->media_type == e1000_media_type_fiber) | ||
233 | hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | | ||
234 | ADVERTISED_FIBRE | | ||
235 | ADVERTISED_Autoneg; | ||
236 | else | ||
237 | hw->phy.autoneg_advertised = ecmd->advertising | | ||
238 | ADVERTISED_TP | | ||
239 | ADVERTISED_Autoneg; | ||
240 | ecmd->advertising = hw->phy.autoneg_advertised; | ||
241 | } else { | ||
242 | if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { | ||
243 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /* reset the link */ | ||
249 | |||
250 | if (netif_running(adapter->netdev)) { | ||
251 | e1000e_down(adapter); | ||
252 | e1000e_up(adapter); | ||
253 | } else { | ||
254 | e1000e_reset(adapter); | ||
255 | } | ||
256 | |||
257 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static void e1000_get_pauseparam(struct net_device *netdev, | ||
262 | struct ethtool_pauseparam *pause) | ||
263 | { | ||
264 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
265 | struct e1000_hw *hw = &adapter->hw; | ||
266 | |||
267 | pause->autoneg = | ||
268 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | ||
269 | |||
270 | if (hw->mac.fc == e1000_fc_rx_pause) { | ||
271 | pause->rx_pause = 1; | ||
272 | } else if (hw->mac.fc == e1000_fc_tx_pause) { | ||
273 | pause->tx_pause = 1; | ||
274 | } else if (hw->mac.fc == e1000_fc_full) { | ||
275 | pause->rx_pause = 1; | ||
276 | pause->tx_pause = 1; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static int e1000_set_pauseparam(struct net_device *netdev, | ||
281 | struct ethtool_pauseparam *pause) | ||
282 | { | ||
283 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
284 | struct e1000_hw *hw = &adapter->hw; | ||
285 | int retval = 0; | ||
286 | |||
287 | adapter->fc_autoneg = pause->autoneg; | ||
288 | |||
289 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
290 | msleep(1); | ||
291 | |||
292 | if (pause->rx_pause && pause->tx_pause) | ||
293 | hw->mac.fc = e1000_fc_full; | ||
294 | else if (pause->rx_pause && !pause->tx_pause) | ||
295 | hw->mac.fc = e1000_fc_rx_pause; | ||
296 | else if (!pause->rx_pause && pause->tx_pause) | ||
297 | hw->mac.fc = e1000_fc_tx_pause; | ||
298 | else if (!pause->rx_pause && !pause->tx_pause) | ||
299 | hw->mac.fc = e1000_fc_none; | ||
300 | |||
301 | hw->mac.original_fc = hw->mac.fc; | ||
302 | |||
303 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { | ||
304 | if (netif_running(adapter->netdev)) { | ||
305 | e1000e_down(adapter); | ||
306 | e1000e_up(adapter); | ||
307 | } else { | ||
308 | e1000e_reset(adapter); | ||
309 | } | ||
310 | } else { | ||
311 | retval = ((hw->media_type == e1000_media_type_fiber) ? | ||
312 | hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); | ||
313 | } | ||
314 | |||
315 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
316 | return retval; | ||
317 | } | ||
318 | |||
319 | static u32 e1000_get_rx_csum(struct net_device *netdev) | ||
320 | { | ||
321 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
322 | return (adapter->flags & FLAG_RX_CSUM_ENABLED); | ||
323 | } | ||
324 | |||
325 | static int e1000_set_rx_csum(struct net_device *netdev, u32 data) | ||
326 | { | ||
327 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
328 | |||
329 | if (data) | ||
330 | adapter->flags |= FLAG_RX_CSUM_ENABLED; | ||
331 | else | ||
332 | adapter->flags &= ~FLAG_RX_CSUM_ENABLED; | ||
333 | |||
334 | if (netif_running(netdev)) | ||
335 | e1000e_reinit_locked(adapter); | ||
336 | else | ||
337 | e1000e_reset(adapter); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static u32 e1000_get_tx_csum(struct net_device *netdev) | ||
342 | { | ||
343 | return ((netdev->features & NETIF_F_HW_CSUM) != 0); | ||
344 | } | ||
345 | |||
346 | static int e1000_set_tx_csum(struct net_device *netdev, u32 data) | ||
347 | { | ||
348 | if (data) | ||
349 | netdev->features |= NETIF_F_HW_CSUM; | ||
350 | else | ||
351 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int e1000_set_tso(struct net_device *netdev, u32 data) | ||
357 | { | ||
358 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
359 | |||
360 | if (data) { | ||
361 | netdev->features |= NETIF_F_TSO; | ||
362 | netdev->features |= NETIF_F_TSO6; | ||
363 | } else { | ||
364 | netdev->features &= ~NETIF_F_TSO; | ||
365 | netdev->features &= ~NETIF_F_TSO6; | ||
366 | } | ||
367 | |||
368 | ndev_info(netdev, "TSO is %s\n", | ||
369 | data ? "Enabled" : "Disabled"); | ||
370 | adapter->flags |= FLAG_TSO_FORCE; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static u32 e1000_get_msglevel(struct net_device *netdev) | ||
375 | { | ||
376 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
377 | return adapter->msg_enable; | ||
378 | } | ||
379 | |||
380 | static void e1000_set_msglevel(struct net_device *netdev, u32 data) | ||
381 | { | ||
382 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
383 | adapter->msg_enable = data; | ||
384 | } | ||
385 | |||
386 | static int e1000_get_regs_len(struct net_device *netdev) | ||
387 | { | ||
388 | #define E1000_REGS_LEN 32 /* overestimate */ | ||
389 | return E1000_REGS_LEN * sizeof(u32); | ||
390 | } | ||
391 | |||
392 | static void e1000_get_regs(struct net_device *netdev, | ||
393 | struct ethtool_regs *regs, void *p) | ||
394 | { | ||
395 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
396 | struct e1000_hw *hw = &adapter->hw; | ||
397 | u32 *regs_buff = p; | ||
398 | u16 phy_data; | ||
399 | u8 revision_id; | ||
400 | |||
401 | memset(p, 0, E1000_REGS_LEN * sizeof(u32)); | ||
402 | |||
403 | pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); | ||
404 | |||
405 | regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; | ||
406 | |||
407 | regs_buff[0] = er32(CTRL); | ||
408 | regs_buff[1] = er32(STATUS); | ||
409 | |||
410 | regs_buff[2] = er32(RCTL); | ||
411 | regs_buff[3] = er32(RDLEN); | ||
412 | regs_buff[4] = er32(RDH); | ||
413 | regs_buff[5] = er32(RDT); | ||
414 | regs_buff[6] = er32(RDTR); | ||
415 | |||
416 | regs_buff[7] = er32(TCTL); | ||
417 | regs_buff[8] = er32(TDLEN); | ||
418 | regs_buff[9] = er32(TDH); | ||
419 | regs_buff[10] = er32(TDT); | ||
420 | regs_buff[11] = er32(TIDV); | ||
421 | |||
422 | regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ | ||
423 | if (hw->phy.type == e1000_phy_m88) { | ||
424 | e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | ||
425 | regs_buff[13] = (u32)phy_data; /* cable length */ | ||
426 | regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
427 | regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
428 | regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
429 | e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | ||
430 | regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ | ||
431 | regs_buff[18] = regs_buff[13]; /* cable polarity */ | ||
432 | regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | ||
433 | regs_buff[20] = regs_buff[17]; /* polarity correction */ | ||
434 | /* phy receive errors */ | ||
435 | regs_buff[22] = adapter->phy_stats.receive_errors; | ||
436 | regs_buff[23] = regs_buff[13]; /* mdix mode */ | ||
437 | } | ||
438 | regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ | ||
439 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | ||
440 | regs_buff[24] = (u32)phy_data; /* phy local receiver status */ | ||
441 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | ||
442 | } | ||
443 | |||
444 | static int e1000_get_eeprom_len(struct net_device *netdev) | ||
445 | { | ||
446 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
447 | return adapter->hw.nvm.word_size * 2; | ||
448 | } | ||
449 | |||
450 | static int e1000_get_eeprom(struct net_device *netdev, | ||
451 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
452 | { | ||
453 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
454 | struct e1000_hw *hw = &adapter->hw; | ||
455 | u16 *eeprom_buff; | ||
456 | int first_word; | ||
457 | int last_word; | ||
458 | int ret_val = 0; | ||
459 | u16 i; | ||
460 | |||
461 | if (eeprom->len == 0) | ||
462 | return -EINVAL; | ||
463 | |||
464 | eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); | ||
465 | |||
466 | first_word = eeprom->offset >> 1; | ||
467 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
468 | |||
469 | eeprom_buff = kmalloc(sizeof(u16) * | ||
470 | (last_word - first_word + 1), GFP_KERNEL); | ||
471 | if (!eeprom_buff) | ||
472 | return -ENOMEM; | ||
473 | |||
474 | if (hw->nvm.type == e1000_nvm_eeprom_spi) { | ||
475 | ret_val = e1000_read_nvm(hw, first_word, | ||
476 | last_word - first_word + 1, | ||
477 | eeprom_buff); | ||
478 | } else { | ||
479 | for (i = 0; i < last_word - first_word + 1; i++) { | ||
480 | ret_val = e1000_read_nvm(hw, first_word + i, 1, | ||
481 | &eeprom_buff[i]); | ||
482 | if (ret_val) | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | /* Device's eeprom is always little-endian, word addressable */ | ||
488 | for (i = 0; i < last_word - first_word + 1; i++) | ||
489 | le16_to_cpus(&eeprom_buff[i]); | ||
490 | |||
491 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); | ||
492 | kfree(eeprom_buff); | ||
493 | |||
494 | return ret_val; | ||
495 | } | ||
496 | |||
497 | static int e1000_set_eeprom(struct net_device *netdev, | ||
498 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
499 | { | ||
500 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
501 | struct e1000_hw *hw = &adapter->hw; | ||
502 | u16 *eeprom_buff; | ||
503 | void *ptr; | ||
504 | int max_len; | ||
505 | int first_word; | ||
506 | int last_word; | ||
507 | int ret_val = 0; | ||
508 | u16 i; | ||
509 | |||
510 | if (eeprom->len == 0) | ||
511 | return -EOPNOTSUPP; | ||
512 | |||
513 | if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) | ||
514 | return -EFAULT; | ||
515 | |||
516 | max_len = hw->nvm.word_size * 2; | ||
517 | |||
518 | first_word = eeprom->offset >> 1; | ||
519 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
520 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | ||
521 | if (!eeprom_buff) | ||
522 | return -ENOMEM; | ||
523 | |||
524 | ptr = (void *)eeprom_buff; | ||
525 | |||
526 | if (eeprom->offset & 1) { | ||
527 | /* need read/modify/write of first changed EEPROM word */ | ||
528 | /* only the second byte of the word is being modified */ | ||
529 | ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); | ||
530 | ptr++; | ||
531 | } | ||
532 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) | ||
533 | /* need read/modify/write of last changed EEPROM word */ | ||
534 | /* only the first byte of the word is being modified */ | ||
535 | ret_val = e1000_read_nvm(hw, last_word, 1, | ||
536 | &eeprom_buff[last_word - first_word]); | ||
537 | |||
538 | /* Device's eeprom is always little-endian, word addressable */ | ||
539 | for (i = 0; i < last_word - first_word + 1; i++) | ||
540 | le16_to_cpus(&eeprom_buff[i]); | ||
541 | |||
542 | memcpy(ptr, bytes, eeprom->len); | ||
543 | |||
544 | for (i = 0; i < last_word - first_word + 1; i++) | ||
545 | eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); | ||
546 | |||
547 | ret_val = e1000_write_nvm(hw, first_word, | ||
548 | last_word - first_word + 1, eeprom_buff); | ||
549 | |||
550 | /* Update the checksum over the first part of the EEPROM if needed | ||
551 | * and flush shadow RAM for 82573 controllers */ | ||
552 | if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || | ||
553 | (hw->mac.type == e1000_82573))) | ||
554 | e1000e_update_nvm_checksum(hw); | ||
555 | |||
556 | kfree(eeprom_buff); | ||
557 | return ret_val; | ||
558 | } | ||
559 | |||
560 | static void e1000_get_drvinfo(struct net_device *netdev, | ||
561 | struct ethtool_drvinfo *drvinfo) | ||
562 | { | ||
563 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
564 | char firmware_version[32]; | ||
565 | u16 eeprom_data; | ||
566 | |||
567 | strncpy(drvinfo->driver, e1000e_driver_name, 32); | ||
568 | strncpy(drvinfo->version, e1000e_driver_version, 32); | ||
569 | |||
570 | /* EEPROM image version # is reported as firmware version # for | ||
571 | * PCI-E controllers */ | ||
572 | e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); | ||
573 | sprintf(firmware_version, "%d.%d-%d", | ||
574 | (eeprom_data & 0xF000) >> 12, | ||
575 | (eeprom_data & 0x0FF0) >> 4, | ||
576 | eeprom_data & 0x000F); | ||
577 | |||
578 | strncpy(drvinfo->fw_version, firmware_version, 32); | ||
579 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | ||
580 | drvinfo->n_stats = E1000_STATS_LEN; | ||
581 | drvinfo->testinfo_len = E1000_TEST_LEN; | ||
582 | drvinfo->regdump_len = e1000_get_regs_len(netdev); | ||
583 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); | ||
584 | } | ||
585 | |||
586 | static void e1000_get_ringparam(struct net_device *netdev, | ||
587 | struct ethtool_ringparam *ring) | ||
588 | { | ||
589 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
590 | struct e1000_ring *tx_ring = adapter->tx_ring; | ||
591 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
592 | |||
593 | ring->rx_max_pending = E1000_MAX_RXD; | ||
594 | ring->tx_max_pending = E1000_MAX_TXD; | ||
595 | ring->rx_mini_max_pending = 0; | ||
596 | ring->rx_jumbo_max_pending = 0; | ||
597 | ring->rx_pending = rx_ring->count; | ||
598 | ring->tx_pending = tx_ring->count; | ||
599 | ring->rx_mini_pending = 0; | ||
600 | ring->rx_jumbo_pending = 0; | ||
601 | } | ||
602 | |||
603 | static int e1000_set_ringparam(struct net_device *netdev, | ||
604 | struct ethtool_ringparam *ring) | ||
605 | { | ||
606 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
607 | struct e1000_ring *tx_ring, *tx_old; | ||
608 | struct e1000_ring *rx_ring, *rx_old; | ||
609 | int err; | ||
610 | |||
611 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
612 | return -EINVAL; | ||
613 | |||
614 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | ||
615 | msleep(1); | ||
616 | |||
617 | if (netif_running(adapter->netdev)) | ||
618 | e1000e_down(adapter); | ||
619 | |||
620 | tx_old = adapter->tx_ring; | ||
621 | rx_old = adapter->rx_ring; | ||
622 | |||
623 | err = -ENOMEM; | ||
624 | tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | ||
625 | if (!tx_ring) | ||
626 | goto err_alloc_tx; | ||
627 | |||
628 | rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | ||
629 | if (!rx_ring) | ||
630 | goto err_alloc_rx; | ||
631 | |||
632 | adapter->tx_ring = tx_ring; | ||
633 | adapter->rx_ring = rx_ring; | ||
634 | |||
635 | rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); | ||
636 | rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); | ||
637 | rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); | ||
638 | |||
639 | tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); | ||
640 | tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); | ||
641 | tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); | ||
642 | |||
643 | if (netif_running(adapter->netdev)) { | ||
644 | /* Try to get new resources before deleting old */ | ||
645 | err = e1000e_setup_rx_resources(adapter); | ||
646 | if (err) | ||
647 | goto err_setup_rx; | ||
648 | err = e1000e_setup_tx_resources(adapter); | ||
649 | if (err) | ||
650 | goto err_setup_tx; | ||
651 | |||
652 | /* save the new, restore the old in order to free it, | ||
653 | * then restore the new back again */ | ||
654 | adapter->rx_ring = rx_old; | ||
655 | adapter->tx_ring = tx_old; | ||
656 | e1000e_free_rx_resources(adapter); | ||
657 | e1000e_free_tx_resources(adapter); | ||
658 | kfree(tx_old); | ||
659 | kfree(rx_old); | ||
660 | adapter->rx_ring = rx_ring; | ||
661 | adapter->tx_ring = tx_ring; | ||
662 | err = e1000e_up(adapter); | ||
663 | if (err) | ||
664 | goto err_setup; | ||
665 | } | ||
666 | |||
667 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
668 | return 0; | ||
669 | err_setup_tx: | ||
670 | e1000e_free_rx_resources(adapter); | ||
671 | err_setup_rx: | ||
672 | adapter->rx_ring = rx_old; | ||
673 | adapter->tx_ring = tx_old; | ||
674 | kfree(rx_ring); | ||
675 | err_alloc_rx: | ||
676 | kfree(tx_ring); | ||
677 | err_alloc_tx: | ||
678 | e1000e_up(adapter); | ||
679 | err_setup: | ||
680 | clear_bit(__E1000_RESETTING, &adapter->state); | ||
681 | return err; | ||
682 | } | ||
683 | |||
684 | #define REG_PATTERN_TEST(R, M, W) REG_PATTERN_TEST_ARRAY(R, 0, M, W) | ||
685 | #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, writeable) \ | ||
686 | { \ | ||
687 | u32 _pat; \ | ||
688 | u32 _value; \ | ||
689 | u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ | ||
690 | for (_pat = 0; _pat < ARRAY_SIZE(_test); _pat++) { \ | ||
691 | E1000_WRITE_REG_ARRAY(hw, reg, offset, \ | ||
692 | (_test[_pat] & writeable)); \ | ||
693 | _value = E1000_READ_REG_ARRAY(hw, reg, offset); \ | ||
694 | if (_value != (_test[_pat] & writeable & mask)) { \ | ||
695 | ndev_err(netdev, "pattern test reg %04X " \ | ||
696 | "failed: got 0x%08X expected 0x%08X\n", \ | ||
697 | reg + offset, \ | ||
698 | value, (_test[_pat] & writeable & mask)); \ | ||
699 | *data = reg; \ | ||
700 | return 1; \ | ||
701 | } \ | ||
702 | } \ | ||
703 | } | ||
704 | |||
705 | #define REG_SET_AND_CHECK(R, M, W) \ | ||
706 | { \ | ||
707 | u32 _value; \ | ||
708 | __ew32(hw, R, W & M); \ | ||
709 | _value = __er32(hw, R); \ | ||
710 | if ((W & M) != (_value & M)) { \ | ||
711 | ndev_err(netdev, "set/check reg %04X test failed: " \ | ||
712 | "got 0x%08X expected 0x%08X\n", R, (_value & M), \ | ||
713 | (W & M)); \ | ||
714 | *data = R; \ | ||
715 | return 1; \ | ||
716 | } \ | ||
717 | } | ||
718 | |||
719 | static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | ||
720 | { | ||
721 | struct e1000_hw *hw = &adapter->hw; | ||
722 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
723 | struct net_device *netdev = adapter->netdev; | ||
724 | u32 value; | ||
725 | u32 before; | ||
726 | u32 after; | ||
727 | u32 i; | ||
728 | u32 toggle; | ||
729 | |||
730 | /* The status register is Read Only, so a write should fail. | ||
731 | * Some bits that get toggled are ignored. | ||
732 | */ | ||
733 | switch (mac->type) { | ||
734 | /* there are several bits on newer hardware that are r/w */ | ||
735 | case e1000_82571: | ||
736 | case e1000_82572: | ||
737 | case e1000_80003es2lan: | ||
738 | toggle = 0x7FFFF3FF; | ||
739 | break; | ||
740 | case e1000_82573: | ||
741 | case e1000_ich8lan: | ||
742 | case e1000_ich9lan: | ||
743 | toggle = 0x7FFFF033; | ||
744 | break; | ||
745 | default: | ||
746 | toggle = 0xFFFFF833; | ||
747 | break; | ||
748 | } | ||
749 | |||
750 | before = er32(STATUS); | ||
751 | value = (er32(STATUS) & toggle); | ||
752 | ew32(STATUS, toggle); | ||
753 | after = er32(STATUS) & toggle; | ||
754 | if (value != after) { | ||
755 | ndev_err(netdev, "failed STATUS register test got: " | ||
756 | "0x%08X expected: 0x%08X\n", after, value); | ||
757 | *data = 1; | ||
758 | return 1; | ||
759 | } | ||
760 | /* restore previous status */ | ||
761 | ew32(STATUS, before); | ||
762 | |||
763 | if ((mac->type != e1000_ich8lan) && | ||
764 | (mac->type != e1000_ich9lan)) { | ||
765 | REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | ||
766 | REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); | ||
767 | REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); | ||
768 | REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); | ||
769 | } | ||
770 | |||
771 | REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); | ||
772 | REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | ||
773 | REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); | ||
774 | REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); | ||
775 | REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); | ||
776 | REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); | ||
777 | REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); | ||
778 | REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); | ||
779 | REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | ||
780 | REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); | ||
781 | |||
782 | REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); | ||
783 | |||
784 | before = (((mac->type == e1000_ich8lan) || | ||
785 | (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); | ||
786 | REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); | ||
787 | REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); | ||
788 | |||
789 | REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF); | ||
790 | REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFF000, 0xFFFFFFFF); | ||
791 | REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF); | ||
792 | REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFF000, 0xFFFFFFFF); | ||
793 | |||
794 | for (i = 0; i < mac->mta_reg_count; i++) | ||
795 | REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); | ||
796 | |||
797 | *data = 0; | ||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | ||
802 | { | ||
803 | u16 temp; | ||
804 | u16 checksum = 0; | ||
805 | u16 i; | ||
806 | |||
807 | *data = 0; | ||
808 | /* Read and add up the contents of the EEPROM */ | ||
809 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | ||
810 | if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { | ||
811 | *data = 1; | ||
812 | break; | ||
813 | } | ||
814 | checksum += temp; | ||
815 | } | ||
816 | |||
817 | /* If Checksum is not Correct return error else test passed */ | ||
818 | if ((checksum != (u16) NVM_SUM) && !(*data)) | ||
819 | *data = 2; | ||
820 | |||
821 | return *data; | ||
822 | } | ||
823 | |||
824 | static irqreturn_t e1000_test_intr(int irq, void *data) | ||
825 | { | ||
826 | struct net_device *netdev = (struct net_device *) data; | ||
827 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
828 | struct e1000_hw *hw = &adapter->hw; | ||
829 | |||
830 | adapter->test_icr |= er32(ICR); | ||
831 | |||
832 | return IRQ_HANDLED; | ||
833 | } | ||
834 | |||
835 | static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | ||
836 | { | ||
837 | struct net_device *netdev = adapter->netdev; | ||
838 | struct e1000_hw *hw = &adapter->hw; | ||
839 | u32 mask; | ||
840 | u32 shared_int = 1; | ||
841 | u32 irq = adapter->pdev->irq; | ||
842 | int i; | ||
843 | |||
844 | *data = 0; | ||
845 | |||
846 | /* NOTE: we don't test MSI interrupts here, yet */ | ||
847 | /* Hook up test interrupt handler just for this test */ | ||
848 | if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | ||
849 | netdev)) { | ||
850 | shared_int = 0; | ||
851 | } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, | ||
852 | netdev->name, netdev)) { | ||
853 | *data = 1; | ||
854 | return -1; | ||
855 | } | ||
856 | ndev_info(netdev, "testing %s interrupt\n", | ||
857 | (shared_int ? "shared" : "unshared")); | ||
858 | |||
859 | /* Disable all the interrupts */ | ||
860 | ew32(IMC, 0xFFFFFFFF); | ||
861 | msleep(10); | ||
862 | |||
863 | /* Test each interrupt */ | ||
864 | for (i = 0; i < 10; i++) { | ||
865 | |||
866 | if (((adapter->hw.mac.type == e1000_ich8lan) || | ||
867 | (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) | ||
868 | continue; | ||
869 | |||
870 | /* Interrupt to test */ | ||
871 | mask = 1 << i; | ||
872 | |||
873 | if (!shared_int) { | ||
874 | /* Disable the interrupt to be reported in | ||
875 | * the cause register and then force the same | ||
876 | * interrupt and see if one gets posted. If | ||
877 | * an interrupt was posted to the bus, the | ||
878 | * test failed. | ||
879 | */ | ||
880 | adapter->test_icr = 0; | ||
881 | ew32(IMC, mask); | ||
882 | ew32(ICS, mask); | ||
883 | msleep(10); | ||
884 | |||
885 | if (adapter->test_icr & mask) { | ||
886 | *data = 3; | ||
887 | break; | ||
888 | } | ||
889 | } | ||
890 | |||
891 | /* Enable the interrupt to be reported in | ||
892 | * the cause register and then force the same | ||
893 | * interrupt and see if one gets posted. If | ||
894 | * an interrupt was not posted to the bus, the | ||
895 | * test failed. | ||
896 | */ | ||
897 | adapter->test_icr = 0; | ||
898 | ew32(IMS, mask); | ||
899 | ew32(ICS, mask); | ||
900 | msleep(10); | ||
901 | |||
902 | if (!(adapter->test_icr & mask)) { | ||
903 | *data = 4; | ||
904 | break; | ||
905 | } | ||
906 | |||
907 | if (!shared_int) { | ||
908 | /* Disable the other interrupts to be reported in | ||
909 | * the cause register and then force the other | ||
910 | * interrupts and see if any get posted. If | ||
911 | * an interrupt was posted to the bus, the | ||
912 | * test failed. | ||
913 | */ | ||
914 | adapter->test_icr = 0; | ||
915 | ew32(IMC, ~mask & 0x00007FFF); | ||
916 | ew32(ICS, ~mask & 0x00007FFF); | ||
917 | msleep(10); | ||
918 | |||
919 | if (adapter->test_icr) { | ||
920 | *data = 5; | ||
921 | break; | ||
922 | } | ||
923 | } | ||
924 | } | ||
925 | |||
926 | /* Disable all the interrupts */ | ||
927 | ew32(IMC, 0xFFFFFFFF); | ||
928 | msleep(10); | ||
929 | |||
930 | /* Unhook test interrupt handler */ | ||
931 | free_irq(irq, netdev); | ||
932 | |||
933 | return *data; | ||
934 | } | ||
935 | |||
936 | static void e1000_free_desc_rings(struct e1000_adapter *adapter) | ||
937 | { | ||
938 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
939 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
940 | struct pci_dev *pdev = adapter->pdev; | ||
941 | int i; | ||
942 | |||
943 | if (tx_ring->desc && tx_ring->buffer_info) { | ||
944 | for (i = 0; i < tx_ring->count; i++) { | ||
945 | if (tx_ring->buffer_info[i].dma) | ||
946 | pci_unmap_single(pdev, | ||
947 | tx_ring->buffer_info[i].dma, | ||
948 | tx_ring->buffer_info[i].length, | ||
949 | PCI_DMA_TODEVICE); | ||
950 | if (tx_ring->buffer_info[i].skb) | ||
951 | dev_kfree_skb(tx_ring->buffer_info[i].skb); | ||
952 | } | ||
953 | } | ||
954 | |||
955 | if (rx_ring->desc && rx_ring->buffer_info) { | ||
956 | for (i = 0; i < rx_ring->count; i++) { | ||
957 | if (rx_ring->buffer_info[i].dma) | ||
958 | pci_unmap_single(pdev, | ||
959 | rx_ring->buffer_info[i].dma, | ||
960 | 2048, PCI_DMA_FROMDEVICE); | ||
961 | if (rx_ring->buffer_info[i].skb) | ||
962 | dev_kfree_skb(rx_ring->buffer_info[i].skb); | ||
963 | } | ||
964 | } | ||
965 | |||
966 | if (tx_ring->desc) { | ||
967 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | ||
968 | tx_ring->dma); | ||
969 | tx_ring->desc = NULL; | ||
970 | } | ||
971 | if (rx_ring->desc) { | ||
972 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | ||
973 | rx_ring->dma); | ||
974 | rx_ring->desc = NULL; | ||
975 | } | ||
976 | |||
977 | kfree(tx_ring->buffer_info); | ||
978 | tx_ring->buffer_info = NULL; | ||
979 | kfree(rx_ring->buffer_info); | ||
980 | rx_ring->buffer_info = NULL; | ||
981 | } | ||
982 | |||
983 | static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | ||
984 | { | ||
985 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
986 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
987 | struct pci_dev *pdev = adapter->pdev; | ||
988 | struct e1000_hw *hw = &adapter->hw; | ||
989 | u32 rctl; | ||
990 | int size; | ||
991 | int i; | ||
992 | int ret_val; | ||
993 | |||
994 | /* Setup Tx descriptor ring and Tx buffers */ | ||
995 | |||
996 | if (!tx_ring->count) | ||
997 | tx_ring->count = E1000_DEFAULT_TXD; | ||
998 | |||
999 | size = tx_ring->count * sizeof(struct e1000_buffer); | ||
1000 | tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); | ||
1001 | if (!tx_ring->buffer_info) { | ||
1002 | ret_val = 1; | ||
1003 | goto err_nomem; | ||
1004 | } | ||
1005 | memset(tx_ring->buffer_info, 0, size); | ||
1006 | |||
1007 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | ||
1008 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
1009 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, | ||
1010 | &tx_ring->dma, GFP_KERNEL); | ||
1011 | if (!tx_ring->desc) { | ||
1012 | ret_val = 2; | ||
1013 | goto err_nomem; | ||
1014 | } | ||
1015 | memset(tx_ring->desc, 0, tx_ring->size); | ||
1016 | tx_ring->next_to_use = 0; | ||
1017 | tx_ring->next_to_clean = 0; | ||
1018 | |||
1019 | ew32(TDBAL, | ||
1020 | ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | ||
1021 | ew32(TDBAH, ((u64) tx_ring->dma >> 32)); | ||
1022 | ew32(TDLEN, | ||
1023 | tx_ring->count * sizeof(struct e1000_tx_desc)); | ||
1024 | ew32(TDH, 0); | ||
1025 | ew32(TDT, 0); | ||
1026 | ew32(TCTL, | ||
1027 | E1000_TCTL_PSP | E1000_TCTL_EN | | ||
1028 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | ||
1029 | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1030 | |||
1031 | for (i = 0; i < tx_ring->count; i++) { | ||
1032 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); | ||
1033 | struct sk_buff *skb; | ||
1034 | unsigned int skb_size = 1024; | ||
1035 | |||
1036 | skb = alloc_skb(skb_size, GFP_KERNEL); | ||
1037 | if (!skb) { | ||
1038 | ret_val = 3; | ||
1039 | goto err_nomem; | ||
1040 | } | ||
1041 | skb_put(skb, skb_size); | ||
1042 | tx_ring->buffer_info[i].skb = skb; | ||
1043 | tx_ring->buffer_info[i].length = skb->len; | ||
1044 | tx_ring->buffer_info[i].dma = | ||
1045 | pci_map_single(pdev, skb->data, skb->len, | ||
1046 | PCI_DMA_TODEVICE); | ||
1047 | if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { | ||
1048 | ret_val = 4; | ||
1049 | goto err_nomem; | ||
1050 | } | ||
1051 | tx_desc->buffer_addr = cpu_to_le64( | ||
1052 | tx_ring->buffer_info[i].dma); | ||
1053 | tx_desc->lower.data = cpu_to_le32(skb->len); | ||
1054 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | ||
1055 | E1000_TXD_CMD_IFCS | | ||
1056 | E1000_TXD_CMD_RPS); | ||
1057 | tx_desc->upper.data = 0; | ||
1058 | } | ||
1059 | |||
1060 | /* Setup Rx descriptor ring and Rx buffers */ | ||
1061 | |||
1062 | if (!rx_ring->count) | ||
1063 | rx_ring->count = E1000_DEFAULT_RXD; | ||
1064 | |||
1065 | size = rx_ring->count * sizeof(struct e1000_buffer); | ||
1066 | rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); | ||
1067 | if (!rx_ring->buffer_info) { | ||
1068 | ret_val = 5; | ||
1069 | goto err_nomem; | ||
1070 | } | ||
1071 | memset(rx_ring->buffer_info, 0, size); | ||
1072 | |||
1073 | rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); | ||
1074 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, | ||
1075 | &rx_ring->dma, GFP_KERNEL); | ||
1076 | if (!rx_ring->desc) { | ||
1077 | ret_val = 6; | ||
1078 | goto err_nomem; | ||
1079 | } | ||
1080 | memset(rx_ring->desc, 0, rx_ring->size); | ||
1081 | rx_ring->next_to_use = 0; | ||
1082 | rx_ring->next_to_clean = 0; | ||
1083 | |||
1084 | rctl = er32(RCTL); | ||
1085 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
1086 | ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); | ||
1087 | ew32(RDBAH, ((u64) rx_ring->dma >> 32)); | ||
1088 | ew32(RDLEN, rx_ring->size); | ||
1089 | ew32(RDH, 0); | ||
1090 | ew32(RDT, 0); | ||
1091 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | ||
1092 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | ||
1093 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | ||
1094 | ew32(RCTL, rctl); | ||
1095 | |||
1096 | for (i = 0; i < rx_ring->count; i++) { | ||
1097 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
1098 | struct sk_buff *skb; | ||
1099 | |||
1100 | skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); | ||
1101 | if (!skb) { | ||
1102 | ret_val = 7; | ||
1103 | goto err_nomem; | ||
1104 | } | ||
1105 | skb_reserve(skb, NET_IP_ALIGN); | ||
1106 | rx_ring->buffer_info[i].skb = skb; | ||
1107 | rx_ring->buffer_info[i].dma = | ||
1108 | pci_map_single(pdev, skb->data, 2048, | ||
1109 | PCI_DMA_FROMDEVICE); | ||
1110 | if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { | ||
1111 | ret_val = 8; | ||
1112 | goto err_nomem; | ||
1113 | } | ||
1114 | rx_desc->buffer_addr = | ||
1115 | cpu_to_le64(rx_ring->buffer_info[i].dma); | ||
1116 | memset(skb->data, 0x00, skb->len); | ||
1117 | } | ||
1118 | |||
1119 | return 0; | ||
1120 | |||
1121 | err_nomem: | ||
1122 | e1000_free_desc_rings(adapter); | ||
1123 | return ret_val; | ||
1124 | } | ||
1125 | |||
1126 | static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) | ||
1127 | { | ||
1128 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ | ||
1129 | e1e_wphy(&adapter->hw, 29, 0x001F); | ||
1130 | e1e_wphy(&adapter->hw, 30, 0x8FFC); | ||
1131 | e1e_wphy(&adapter->hw, 29, 0x001A); | ||
1132 | e1e_wphy(&adapter->hw, 30, 0x8FF0); | ||
1133 | } | ||
1134 | |||
1135 | static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | ||
1136 | { | ||
1137 | struct e1000_hw *hw = &adapter->hw; | ||
1138 | u32 ctrl_reg = 0; | ||
1139 | u32 stat_reg = 0; | ||
1140 | |||
1141 | adapter->hw.mac.autoneg = 0; | ||
1142 | |||
1143 | if (adapter->hw.phy.type == e1000_phy_m88) { | ||
1144 | /* Auto-MDI/MDIX Off */ | ||
1145 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | ||
1146 | /* reset to update Auto-MDI/MDIX */ | ||
1147 | e1e_wphy(hw, PHY_CONTROL, 0x9140); | ||
1148 | /* autoneg off */ | ||
1149 | e1e_wphy(hw, PHY_CONTROL, 0x8140); | ||
1150 | } else if (adapter->hw.phy.type == e1000_phy_gg82563) | ||
1151 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); | ||
1152 | |||
1153 | ctrl_reg = er32(CTRL); | ||
1154 | |||
1155 | if (adapter->hw.phy.type == e1000_phy_ife) { | ||
1156 | /* force 100, set loopback */ | ||
1157 | e1e_wphy(hw, PHY_CONTROL, 0x6100); | ||
1158 | |||
1159 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1160 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1161 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1162 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1163 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | ||
1164 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1165 | } else { | ||
1166 | /* force 1000, set loopback */ | ||
1167 | e1e_wphy(hw, PHY_CONTROL, 0x4140); | ||
1168 | |||
1169 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1170 | ctrl_reg = er32(CTRL); | ||
1171 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1172 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1173 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1174 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | ||
1175 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1176 | } | ||
1177 | |||
1178 | if (adapter->hw.media_type == e1000_media_type_copper && | ||
1179 | adapter->hw.phy.type == e1000_phy_m88) { | ||
1180 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | ||
1181 | } else { | ||
1182 | /* Set the ILOS bit on the fiber Nic if half duplex link is | ||
1183 | * detected. */ | ||
1184 | stat_reg = er32(STATUS); | ||
1185 | if ((stat_reg & E1000_STATUS_FD) == 0) | ||
1186 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | ||
1187 | } | ||
1188 | |||
1189 | ew32(CTRL, ctrl_reg); | ||
1190 | |||
1191 | /* Disable the receiver on the PHY so when a cable is plugged in, the | ||
1192 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | ||
1193 | */ | ||
1194 | if (adapter->hw.phy.type == e1000_phy_m88) | ||
1195 | e1000_phy_disable_receiver(adapter); | ||
1196 | |||
1197 | udelay(500); | ||
1198 | |||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) | ||
1203 | { | ||
1204 | struct e1000_hw *hw = &adapter->hw; | ||
1205 | u32 ctrl = er32(CTRL); | ||
1206 | int link = 0; | ||
1207 | |||
1208 | /* special requirements for 82571/82572 fiber adapters */ | ||
1209 | |||
1210 | /* jump through hoops to make sure link is up because serdes | ||
1211 | * link is hardwired up */ | ||
1212 | ctrl |= E1000_CTRL_SLU; | ||
1213 | ew32(CTRL, ctrl); | ||
1214 | |||
1215 | /* disable autoneg */ | ||
1216 | ctrl = er32(TXCW); | ||
1217 | ctrl &= ~(1 << 31); | ||
1218 | ew32(TXCW, ctrl); | ||
1219 | |||
1220 | link = (er32(STATUS) & E1000_STATUS_LU); | ||
1221 | |||
1222 | if (!link) { | ||
1223 | /* set invert loss of signal */ | ||
1224 | ctrl = er32(CTRL); | ||
1225 | ctrl |= E1000_CTRL_ILOS; | ||
1226 | ew32(CTRL, ctrl); | ||
1227 | } | ||
1228 | |||
1229 | /* special write to serdes control register to enable SerDes analog | ||
1230 | * loopback */ | ||
1231 | #define E1000_SERDES_LB_ON 0x410 | ||
1232 | ew32(SCTL, E1000_SERDES_LB_ON); | ||
1233 | msleep(10); | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1238 | /* only call this for fiber/serdes connections to es2lan */ | ||
1239 | static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) | ||
1240 | { | ||
1241 | struct e1000_hw *hw = &adapter->hw; | ||
1242 | u32 ctrlext = er32(CTRL_EXT); | ||
1243 | u32 ctrl = er32(CTRL); | ||
1244 | |||
1245 | /* save CTRL_EXT to restore later, reuse an empty variable (unused | ||
1246 | on mac_type 80003es2lan) */ | ||
1247 | adapter->tx_fifo_head = ctrlext; | ||
1248 | |||
1249 | /* clear the serdes mode bits, putting the device into mac loopback */ | ||
1250 | ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; | ||
1251 | ew32(CTRL_EXT, ctrlext); | ||
1252 | |||
1253 | /* force speed to 1000/FD, link up */ | ||
1254 | ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | ||
1255 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | | ||
1256 | E1000_CTRL_SPD_1000 | E1000_CTRL_FD); | ||
1257 | ew32(CTRL, ctrl); | ||
1258 | |||
1259 | /* set mac loopback */ | ||
1260 | ctrl = er32(RCTL); | ||
1261 | ctrl |= E1000_RCTL_LBM_MAC; | ||
1262 | ew32(RCTL, ctrl); | ||
1263 | |||
1264 | /* set testing mode parameters (no need to reset later) */ | ||
1265 | #define KMRNCTRLSTA_OPMODE (0x1F << 16) | ||
1266 | #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 | ||
1267 | ew32(KMRNCTRLSTA, | ||
1268 | (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); | ||
1269 | |||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | static int e1000_setup_loopback_test(struct e1000_adapter *adapter) | ||
1274 | { | ||
1275 | struct e1000_hw *hw = &adapter->hw; | ||
1276 | u32 rctl; | ||
1277 | |||
1278 | if (hw->media_type == e1000_media_type_fiber || | ||
1279 | hw->media_type == e1000_media_type_internal_serdes) { | ||
1280 | switch (hw->mac.type) { | ||
1281 | case e1000_80003es2lan: | ||
1282 | return e1000_set_es2lan_mac_loopback(adapter); | ||
1283 | break; | ||
1284 | case e1000_82571: | ||
1285 | case e1000_82572: | ||
1286 | return e1000_set_82571_fiber_loopback(adapter); | ||
1287 | break; | ||
1288 | default: | ||
1289 | rctl = er32(RCTL); | ||
1290 | rctl |= E1000_RCTL_LBM_TCVR; | ||
1291 | ew32(RCTL, rctl); | ||
1292 | return 0; | ||
1293 | } | ||
1294 | } else if (hw->media_type == e1000_media_type_copper) { | ||
1295 | return e1000_integrated_phy_loopback(adapter); | ||
1296 | } | ||
1297 | |||
1298 | return 7; | ||
1299 | } | ||
1300 | |||
1301 | static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | ||
1302 | { | ||
1303 | struct e1000_hw *hw = &adapter->hw; | ||
1304 | u32 rctl; | ||
1305 | u16 phy_reg; | ||
1306 | |||
1307 | rctl = er32(RCTL); | ||
1308 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | ||
1309 | ew32(RCTL, rctl); | ||
1310 | |||
1311 | switch (hw->mac.type) { | ||
1312 | case e1000_80003es2lan: | ||
1313 | if (hw->media_type == e1000_media_type_fiber || | ||
1314 | hw->media_type == e1000_media_type_internal_serdes) { | ||
1315 | /* restore CTRL_EXT, stealing space from tx_fifo_head */ | ||
1316 | ew32(CTRL_EXT, | ||
1317 | adapter->tx_fifo_head); | ||
1318 | adapter->tx_fifo_head = 0; | ||
1319 | } | ||
1320 | /* fall through */ | ||
1321 | case e1000_82571: | ||
1322 | case e1000_82572: | ||
1323 | if (hw->media_type == e1000_media_type_fiber || | ||
1324 | hw->media_type == e1000_media_type_internal_serdes) { | ||
1325 | #define E1000_SERDES_LB_OFF 0x400 | ||
1326 | ew32(SCTL, E1000_SERDES_LB_OFF); | ||
1327 | msleep(10); | ||
1328 | break; | ||
1329 | } | ||
1330 | /* Fall Through */ | ||
1331 | default: | ||
1332 | hw->mac.autoneg = 1; | ||
1333 | if (hw->phy.type == e1000_phy_gg82563) | ||
1334 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); | ||
1335 | e1e_rphy(hw, PHY_CONTROL, &phy_reg); | ||
1336 | if (phy_reg & MII_CR_LOOPBACK) { | ||
1337 | phy_reg &= ~MII_CR_LOOPBACK; | ||
1338 | e1e_wphy(hw, PHY_CONTROL, phy_reg); | ||
1339 | e1000e_commit_phy(hw); | ||
1340 | } | ||
1341 | break; | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | static void e1000_create_lbtest_frame(struct sk_buff *skb, | ||
1346 | unsigned int frame_size) | ||
1347 | { | ||
1348 | memset(skb->data, 0xFF, frame_size); | ||
1349 | frame_size &= ~1; | ||
1350 | memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); | ||
1351 | memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); | ||
1352 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); | ||
1353 | } | ||
1354 | |||
1355 | static int e1000_check_lbtest_frame(struct sk_buff *skb, | ||
1356 | unsigned int frame_size) | ||
1357 | { | ||
1358 | frame_size &= ~1; | ||
1359 | if (*(skb->data + 3) == 0xFF) | ||
1360 | if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && | ||
1361 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) | ||
1362 | return 0; | ||
1363 | return 13; | ||
1364 | } | ||
1365 | |||
1366 | static int e1000_run_loopback_test(struct e1000_adapter *adapter) | ||
1367 | { | ||
1368 | struct e1000_ring *tx_ring = &adapter->test_tx_ring; | ||
1369 | struct e1000_ring *rx_ring = &adapter->test_rx_ring; | ||
1370 | struct pci_dev *pdev = adapter->pdev; | ||
1371 | struct e1000_hw *hw = &adapter->hw; | ||
1372 | int i, j, k, l; | ||
1373 | int lc; | ||
1374 | int good_cnt; | ||
1375 | int ret_val = 0; | ||
1376 | unsigned long time; | ||
1377 | |||
1378 | ew32(RDT, rx_ring->count - 1); | ||
1379 | |||
1380 | /* Calculate the loop count based on the largest descriptor ring | ||
1381 | * The idea is to wrap the largest ring a number of times using 64 | ||
1382 | * send/receive pairs during each loop | ||
1383 | */ | ||
1384 | |||
1385 | if (rx_ring->count <= tx_ring->count) | ||
1386 | lc = ((tx_ring->count / 64) * 2) + 1; | ||
1387 | else | ||
1388 | lc = ((rx_ring->count / 64) * 2) + 1; | ||
1389 | |||
1390 | k = 0; | ||
1391 | l = 0; | ||
1392 | for (j = 0; j <= lc; j++) { /* loop count loop */ | ||
1393 | for (i = 0; i < 64; i++) { /* send the packets */ | ||
1394 | e1000_create_lbtest_frame( | ||
1395 | tx_ring->buffer_info[i].skb, 1024); | ||
1396 | pci_dma_sync_single_for_device(pdev, | ||
1397 | tx_ring->buffer_info[k].dma, | ||
1398 | tx_ring->buffer_info[k].length, | ||
1399 | PCI_DMA_TODEVICE); | ||
1400 | k++; | ||
1401 | if (k == tx_ring->count) | ||
1402 | k = 0; | ||
1403 | } | ||
1404 | ew32(TDT, k); | ||
1405 | msleep(200); | ||
1406 | time = jiffies; /* set the start time for the receive */ | ||
1407 | good_cnt = 0; | ||
1408 | do { /* receive the sent packets */ | ||
1409 | pci_dma_sync_single_for_cpu(pdev, | ||
1410 | rx_ring->buffer_info[l].dma, 2048, | ||
1411 | PCI_DMA_FROMDEVICE); | ||
1412 | |||
1413 | ret_val = e1000_check_lbtest_frame( | ||
1414 | rx_ring->buffer_info[l].skb, 1024); | ||
1415 | if (!ret_val) | ||
1416 | good_cnt++; | ||
1417 | l++; | ||
1418 | if (l == rx_ring->count) | ||
1419 | l = 0; | ||
1420 | /* time + 20 msecs (200 msecs on 2.4) is more than | ||
1421 | * enough time to complete the receives, if it's | ||
1422 | * exceeded, break and error off | ||
1423 | */ | ||
1424 | } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); | ||
1425 | if (good_cnt != 64) { | ||
1426 | ret_val = 13; /* ret_val is the same as mis-compare */ | ||
1427 | break; | ||
1428 | } | ||
1429 | if (jiffies >= (time + 2)) { | ||
1430 | ret_val = 14; /* error code for time out error */ | ||
1431 | break; | ||
1432 | } | ||
1433 | } /* end loop count loop */ | ||
1434 | return ret_val; | ||
1435 | } | ||
1436 | |||
1437 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | ||
1438 | { | ||
1439 | /* PHY loopback cannot be performed if SoL/IDER | ||
1440 | * sessions are active */ | ||
1441 | if (e1000_check_reset_block(&adapter->hw)) { | ||
1442 | ndev_err(adapter->netdev, "Cannot do PHY loopback test " | ||
1443 | "when SoL/IDER is active.\n"); | ||
1444 | *data = 0; | ||
1445 | goto out; | ||
1446 | } | ||
1447 | |||
1448 | *data = e1000_setup_desc_rings(adapter); | ||
1449 | if (data) | ||
1450 | goto out; | ||
1451 | |||
1452 | *data = e1000_setup_loopback_test(adapter); | ||
1453 | if (data) | ||
1454 | goto err_loopback; | ||
1455 | |||
1456 | *data = e1000_run_loopback_test(adapter); | ||
1457 | e1000_loopback_cleanup(adapter); | ||
1458 | |||
1459 | err_loopback: | ||
1460 | e1000_free_desc_rings(adapter); | ||
1461 | out: | ||
1462 | return *data; | ||
1463 | } | ||
1464 | |||
1465 | static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) | ||
1466 | { | ||
1467 | struct e1000_hw *hw = &adapter->hw; | ||
1468 | |||
1469 | *data = 0; | ||
1470 | if (hw->media_type == e1000_media_type_internal_serdes) { | ||
1471 | int i = 0; | ||
1472 | hw->mac.serdes_has_link = 0; | ||
1473 | |||
1474 | /* On some blade server designs, link establishment | ||
1475 | * could take as long as 2-3 minutes */ | ||
1476 | do { | ||
1477 | hw->mac.ops.check_for_link(hw); | ||
1478 | if (hw->mac.serdes_has_link) | ||
1479 | return *data; | ||
1480 | msleep(20); | ||
1481 | } while (i++ < 3750); | ||
1482 | |||
1483 | *data = 1; | ||
1484 | } else { | ||
1485 | hw->mac.ops.check_for_link(hw); | ||
1486 | if (hw->mac.autoneg) | ||
1487 | msleep(4000); | ||
1488 | |||
1489 | if (!(er32(STATUS) & | ||
1490 | E1000_STATUS_LU)) | ||
1491 | *data = 1; | ||
1492 | } | ||
1493 | return *data; | ||
1494 | } | ||
1495 | |||
1496 | static int e1000_diag_test_count(struct net_device *netdev) | ||
1497 | { | ||
1498 | return E1000_TEST_LEN; | ||
1499 | } | ||
1500 | |||
1501 | static void e1000_diag_test(struct net_device *netdev, | ||
1502 | struct ethtool_test *eth_test, u64 *data) | ||
1503 | { | ||
1504 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1505 | u16 autoneg_advertised; | ||
1506 | u8 forced_speed_duplex; | ||
1507 | u8 autoneg; | ||
1508 | bool if_running = netif_running(netdev); | ||
1509 | |||
1510 | set_bit(__E1000_TESTING, &adapter->state); | ||
1511 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | ||
1512 | /* Offline tests */ | ||
1513 | |||
1514 | /* save speed, duplex, autoneg settings */ | ||
1515 | autoneg_advertised = adapter->hw.phy.autoneg_advertised; | ||
1516 | forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; | ||
1517 | autoneg = adapter->hw.mac.autoneg; | ||
1518 | |||
1519 | ndev_info(netdev, "offline testing starting\n"); | ||
1520 | |||
1521 | /* Link test performed before hardware reset so autoneg doesn't | ||
1522 | * interfere with test result */ | ||
1523 | if (e1000_link_test(adapter, &data[4])) | ||
1524 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1525 | |||
1526 | if (if_running) | ||
1527 | /* indicate we're in test mode */ | ||
1528 | dev_close(netdev); | ||
1529 | else | ||
1530 | e1000e_reset(adapter); | ||
1531 | |||
1532 | if (e1000_reg_test(adapter, &data[0])) | ||
1533 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1534 | |||
1535 | e1000e_reset(adapter); | ||
1536 | if (e1000_eeprom_test(adapter, &data[1])) | ||
1537 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1538 | |||
1539 | e1000e_reset(adapter); | ||
1540 | if (e1000_intr_test(adapter, &data[2])) | ||
1541 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1542 | |||
1543 | e1000e_reset(adapter); | ||
1544 | /* make sure the phy is powered up */ | ||
1545 | e1000e_power_up_phy(adapter); | ||
1546 | if (e1000_loopback_test(adapter, &data[3])) | ||
1547 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1548 | |||
1549 | /* restore speed, duplex, autoneg settings */ | ||
1550 | adapter->hw.phy.autoneg_advertised = autoneg_advertised; | ||
1551 | adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; | ||
1552 | adapter->hw.mac.autoneg = autoneg; | ||
1553 | |||
1554 | /* force this routine to wait until autoneg complete/timeout */ | ||
1555 | adapter->hw.phy.wait_for_link = 1; | ||
1556 | e1000e_reset(adapter); | ||
1557 | adapter->hw.phy.wait_for_link = 0; | ||
1558 | |||
1559 | clear_bit(__E1000_TESTING, &adapter->state); | ||
1560 | if (if_running) | ||
1561 | dev_open(netdev); | ||
1562 | } else { | ||
1563 | ndev_info(netdev, "online testing starting\n"); | ||
1564 | /* Online tests */ | ||
1565 | if (e1000_link_test(adapter, &data[4])) | ||
1566 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1567 | |||
1568 | /* Online tests aren't run; pass by default */ | ||
1569 | data[0] = 0; | ||
1570 | data[1] = 0; | ||
1571 | data[2] = 0; | ||
1572 | data[3] = 0; | ||
1573 | |||
1574 | clear_bit(__E1000_TESTING, &adapter->state); | ||
1575 | } | ||
1576 | msleep_interruptible(4 * 1000); | ||
1577 | } | ||
1578 | |||
1579 | static void e1000_get_wol(struct net_device *netdev, | ||
1580 | struct ethtool_wolinfo *wol) | ||
1581 | { | ||
1582 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1583 | |||
1584 | wol->supported = 0; | ||
1585 | wol->wolopts = 0; | ||
1586 | |||
1587 | if (!(adapter->flags & FLAG_HAS_WOL)) | ||
1588 | return; | ||
1589 | |||
1590 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
1591 | WAKE_BCAST | WAKE_MAGIC; | ||
1592 | |||
1593 | /* apply any specific unsupported masks here */ | ||
1594 | if (adapter->flags & FLAG_NO_WAKE_UCAST) { | ||
1595 | wol->supported &= ~WAKE_UCAST; | ||
1596 | |||
1597 | if (adapter->wol & E1000_WUFC_EX) | ||
1598 | ndev_err(netdev, "Interface does not support " | ||
1599 | "directed (unicast) frame wake-up packets\n"); | ||
1600 | } | ||
1601 | |||
1602 | if (adapter->wol & E1000_WUFC_EX) | ||
1603 | wol->wolopts |= WAKE_UCAST; | ||
1604 | if (adapter->wol & E1000_WUFC_MC) | ||
1605 | wol->wolopts |= WAKE_MCAST; | ||
1606 | if (adapter->wol & E1000_WUFC_BC) | ||
1607 | wol->wolopts |= WAKE_BCAST; | ||
1608 | if (adapter->wol & E1000_WUFC_MAG) | ||
1609 | wol->wolopts |= WAKE_MAGIC; | ||
1610 | } | ||
1611 | |||
1612 | static int e1000_set_wol(struct net_device *netdev, | ||
1613 | struct ethtool_wolinfo *wol) | ||
1614 | { | ||
1615 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1616 | |||
1617 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | ||
1618 | return -EOPNOTSUPP; | ||
1619 | |||
1620 | if (!(adapter->flags & FLAG_HAS_WOL)) | ||
1621 | return wol->wolopts ? -EOPNOTSUPP : 0; | ||
1622 | |||
1623 | /* these settings will always override what we currently have */ | ||
1624 | adapter->wol = 0; | ||
1625 | |||
1626 | if (wol->wolopts & WAKE_UCAST) | ||
1627 | adapter->wol |= E1000_WUFC_EX; | ||
1628 | if (wol->wolopts & WAKE_MCAST) | ||
1629 | adapter->wol |= E1000_WUFC_MC; | ||
1630 | if (wol->wolopts & WAKE_BCAST) | ||
1631 | adapter->wol |= E1000_WUFC_BC; | ||
1632 | if (wol->wolopts & WAKE_MAGIC) | ||
1633 | adapter->wol |= E1000_WUFC_MAG; | ||
1634 | |||
1635 | return 0; | ||
1636 | } | ||
1637 | |||
1638 | /* toggle LED 4 times per second = 2 "blinks" per second */ | ||
1639 | #define E1000_ID_INTERVAL (HZ/4) | ||
1640 | |||
1641 | /* bit defines for adapter->led_status */ | ||
1642 | #define E1000_LED_ON 0 | ||
1643 | |||
1644 | static void e1000_led_blink_callback(unsigned long data) | ||
1645 | { | ||
1646 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | ||
1647 | |||
1648 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) | ||
1649 | adapter->hw.mac.ops.led_off(&adapter->hw); | ||
1650 | else | ||
1651 | adapter->hw.mac.ops.led_on(&adapter->hw); | ||
1652 | |||
1653 | mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); | ||
1654 | } | ||
1655 | |||
1656 | static int e1000_phys_id(struct net_device *netdev, u32 data) | ||
1657 | { | ||
1658 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1659 | |||
1660 | if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) | ||
1661 | data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); | ||
1662 | |||
1663 | if (adapter->hw.phy.type == e1000_phy_ife) { | ||
1664 | if (!adapter->blink_timer.function) { | ||
1665 | init_timer(&adapter->blink_timer); | ||
1666 | adapter->blink_timer.function = | ||
1667 | e1000_led_blink_callback; | ||
1668 | adapter->blink_timer.data = (unsigned long) adapter; | ||
1669 | } | ||
1670 | mod_timer(&adapter->blink_timer, jiffies); | ||
1671 | msleep_interruptible(data * 1000); | ||
1672 | del_timer_sync(&adapter->blink_timer); | ||
1673 | e1e_wphy(&adapter->hw, | ||
1674 | IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
1675 | } else { | ||
1676 | e1000e_blink_led(&adapter->hw); | ||
1677 | msleep_interruptible(data * 1000); | ||
1678 | } | ||
1679 | |||
1680 | adapter->hw.mac.ops.led_off(&adapter->hw); | ||
1681 | clear_bit(E1000_LED_ON, &adapter->led_status); | ||
1682 | adapter->hw.mac.ops.cleanup_led(&adapter->hw); | ||
1683 | |||
1684 | return 0; | ||
1685 | } | ||
1686 | |||
1687 | static int e1000_nway_reset(struct net_device *netdev) | ||
1688 | { | ||
1689 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1690 | if (netif_running(netdev)) | ||
1691 | e1000e_reinit_locked(adapter); | ||
1692 | return 0; | ||
1693 | } | ||
1694 | |||
1695 | static int e1000_get_stats_count(struct net_device *netdev) | ||
1696 | { | ||
1697 | return E1000_STATS_LEN; | ||
1698 | } | ||
1699 | |||
1700 | static void e1000_get_ethtool_stats(struct net_device *netdev, | ||
1701 | struct ethtool_stats *stats, | ||
1702 | u64 *data) | ||
1703 | { | ||
1704 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1705 | int i; | ||
1706 | |||
1707 | e1000e_update_stats(adapter); | ||
1708 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | ||
1709 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; | ||
1710 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | ||
1711 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
1712 | } | ||
1713 | } | ||
1714 | |||
1715 | static void e1000_get_strings(struct net_device *netdev, u32 stringset, | ||
1716 | u8 *data) | ||
1717 | { | ||
1718 | u8 *p = data; | ||
1719 | int i; | ||
1720 | |||
1721 | switch (stringset) { | ||
1722 | case ETH_SS_TEST: | ||
1723 | memcpy(data, *e1000_gstrings_test, | ||
1724 | E1000_TEST_LEN*ETH_GSTRING_LEN); | ||
1725 | break; | ||
1726 | case ETH_SS_STATS: | ||
1727 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | ||
1728 | memcpy(p, e1000_gstrings_stats[i].stat_string, | ||
1729 | ETH_GSTRING_LEN); | ||
1730 | p += ETH_GSTRING_LEN; | ||
1731 | } | ||
1732 | break; | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | static const struct ethtool_ops e1000_ethtool_ops = { | ||
1737 | .get_settings = e1000_get_settings, | ||
1738 | .set_settings = e1000_set_settings, | ||
1739 | .get_drvinfo = e1000_get_drvinfo, | ||
1740 | .get_regs_len = e1000_get_regs_len, | ||
1741 | .get_regs = e1000_get_regs, | ||
1742 | .get_wol = e1000_get_wol, | ||
1743 | .set_wol = e1000_set_wol, | ||
1744 | .get_msglevel = e1000_get_msglevel, | ||
1745 | .set_msglevel = e1000_set_msglevel, | ||
1746 | .nway_reset = e1000_nway_reset, | ||
1747 | .get_link = ethtool_op_get_link, | ||
1748 | .get_eeprom_len = e1000_get_eeprom_len, | ||
1749 | .get_eeprom = e1000_get_eeprom, | ||
1750 | .set_eeprom = e1000_set_eeprom, | ||
1751 | .get_ringparam = e1000_get_ringparam, | ||
1752 | .set_ringparam = e1000_set_ringparam, | ||
1753 | .get_pauseparam = e1000_get_pauseparam, | ||
1754 | .set_pauseparam = e1000_set_pauseparam, | ||
1755 | .get_rx_csum = e1000_get_rx_csum, | ||
1756 | .set_rx_csum = e1000_set_rx_csum, | ||
1757 | .get_tx_csum = e1000_get_tx_csum, | ||
1758 | .set_tx_csum = e1000_set_tx_csum, | ||
1759 | .get_sg = ethtool_op_get_sg, | ||
1760 | .set_sg = ethtool_op_set_sg, | ||
1761 | .get_tso = ethtool_op_get_tso, | ||
1762 | .set_tso = e1000_set_tso, | ||
1763 | .self_test_count = e1000_diag_test_count, | ||
1764 | .self_test = e1000_diag_test, | ||
1765 | .get_strings = e1000_get_strings, | ||
1766 | .phys_id = e1000_phys_id, | ||
1767 | .get_stats_count = e1000_get_stats_count, | ||
1768 | .get_ethtool_stats = e1000_get_ethtool_stats, | ||
1769 | }; | ||
1770 | |||
1771 | void e1000e_set_ethtool_ops(struct net_device *netdev) | ||
1772 | { | ||
1773 | SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); | ||
1774 | } | ||