diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-05-13 03:17:42 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-11 05:33:50 -0400 |
commit | 874aeea5d01cac55c160a4e503e3ddb4db030de7 (patch) | |
tree | 2ec67fc737ebc853d954b914a70098ece1ded19b /drivers/net/ethernet/sfc/selftest.c | |
parent | e689cf4a042772f727450035b102579b0c01bdc7 (diff) |
sfc: Move the Solarflare drivers
Moves the Solarflare drivers into drivers/net/ethernet/sfc/ and
make the necessary Kconfig and Makefile changes.
CC: Steve Hodgson <shodgson@solarflare.com>
CC: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/selftest.c')
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.c | 761 |
1 files changed, 761 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c new file mode 100644 index 000000000000..822f6c2a6a7c --- /dev/null +++ b/drivers/net/ethernet/sfc/selftest.c | |||
@@ -0,0 +1,761 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/ethtool.h> | ||
17 | #include <linux/ip.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/udp.h> | ||
20 | #include <linux/rtnetlink.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <asm/io.h> | ||
23 | #include "net_driver.h" | ||
24 | #include "efx.h" | ||
25 | #include "nic.h" | ||
26 | #include "selftest.h" | ||
27 | #include "workarounds.h" | ||
28 | |||
29 | /* | ||
30 | * Loopback test packet structure | ||
31 | * | ||
32 | * The self-test should stress every RSS vector, and unfortunately | ||
33 | * Falcon only performs RSS on TCP/UDP packets. | ||
34 | */ | ||
35 | struct efx_loopback_payload { | ||
36 | struct ethhdr header; | ||
37 | struct iphdr ip; | ||
38 | struct udphdr udp; | ||
39 | __be16 iteration; | ||
40 | const char msg[64]; | ||
41 | } __packed; | ||
42 | |||
43 | /* Loopback test source MAC address */ | ||
44 | static const unsigned char payload_source[ETH_ALEN] = { | ||
45 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | ||
46 | }; | ||
47 | |||
48 | static const char payload_msg[] = | ||
49 | "Hello world! This is an Efx loopback test in progress!"; | ||
50 | |||
51 | /* Interrupt mode names */ | ||
52 | static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
53 | static const char *efx_interrupt_mode_names[] = { | ||
54 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
55 | [EFX_INT_MODE_MSI] = "MSI", | ||
56 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
57 | }; | ||
58 | #define INT_MODE(efx) \ | ||
59 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) | ||
60 | |||
61 | /** | ||
62 | * efx_loopback_state - persistent state during a loopback selftest | ||
63 | * @flush: Drop all packets in efx_loopback_rx_packet | ||
64 | * @packet_count: Number of packets being used in this test | ||
65 | * @skbs: An array of skbs transmitted | ||
66 | * @offload_csum: Checksums are being offloaded | ||
67 | * @rx_good: RX good packet count | ||
68 | * @rx_bad: RX bad packet count | ||
69 | * @payload: Payload used in tests | ||
70 | */ | ||
71 | struct efx_loopback_state { | ||
72 | bool flush; | ||
73 | int packet_count; | ||
74 | struct sk_buff **skbs; | ||
75 | bool offload_csum; | ||
76 | atomic_t rx_good; | ||
77 | atomic_t rx_bad; | ||
78 | struct efx_loopback_payload payload; | ||
79 | }; | ||
80 | |||
81 | /************************************************************************** | ||
82 | * | ||
83 | * MII, NVRAM and register tests | ||
84 | * | ||
85 | **************************************************************************/ | ||
86 | |||
87 | static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) | ||
88 | { | ||
89 | int rc = 0; | ||
90 | |||
91 | if (efx->phy_op->test_alive) { | ||
92 | rc = efx->phy_op->test_alive(efx); | ||
93 | tests->phy_alive = rc ? -1 : 1; | ||
94 | } | ||
95 | |||
96 | return rc; | ||
97 | } | ||
98 | |||
99 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) | ||
100 | { | ||
101 | int rc = 0; | ||
102 | |||
103 | if (efx->type->test_nvram) { | ||
104 | rc = efx->type->test_nvram(efx); | ||
105 | tests->nvram = rc ? -1 : 1; | ||
106 | } | ||
107 | |||
108 | return rc; | ||
109 | } | ||
110 | |||
111 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | ||
112 | { | ||
113 | int rc = 0; | ||
114 | |||
115 | /* Test register access */ | ||
116 | if (efx->type->test_registers) { | ||
117 | rc = efx->type->test_registers(efx); | ||
118 | tests->registers = rc ? -1 : 1; | ||
119 | } | ||
120 | |||
121 | return rc; | ||
122 | } | ||
123 | |||
124 | /************************************************************************** | ||
125 | * | ||
126 | * Interrupt and event queue testing | ||
127 | * | ||
128 | **************************************************************************/ | ||
129 | |||
130 | /* Test generation and receipt of interrupts */ | ||
131 | static int efx_test_interrupts(struct efx_nic *efx, | ||
132 | struct efx_self_tests *tests) | ||
133 | { | ||
134 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | ||
135 | tests->interrupt = -1; | ||
136 | |||
137 | /* Reset interrupt flag */ | ||
138 | efx->last_irq_cpu = -1; | ||
139 | smp_wmb(); | ||
140 | |||
141 | efx_nic_generate_interrupt(efx); | ||
142 | |||
143 | /* Wait for arrival of test interrupt. */ | ||
144 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); | ||
145 | schedule_timeout_uninterruptible(HZ / 10); | ||
146 | if (efx->last_irq_cpu >= 0) | ||
147 | goto success; | ||
148 | |||
149 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); | ||
150 | return -ETIMEDOUT; | ||
151 | |||
152 | success: | ||
153 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", | ||
154 | INT_MODE(efx), | ||
155 | efx->last_irq_cpu); | ||
156 | tests->interrupt = 1; | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | /* Test generation and receipt of interrupting events */ | ||
161 | static int efx_test_eventq_irq(struct efx_channel *channel, | ||
162 | struct efx_self_tests *tests) | ||
163 | { | ||
164 | struct efx_nic *efx = channel->efx; | ||
165 | unsigned int read_ptr, count; | ||
166 | |||
167 | tests->eventq_dma[channel->channel] = -1; | ||
168 | tests->eventq_int[channel->channel] = -1; | ||
169 | tests->eventq_poll[channel->channel] = -1; | ||
170 | |||
171 | read_ptr = channel->eventq_read_ptr; | ||
172 | channel->efx->last_irq_cpu = -1; | ||
173 | smp_wmb(); | ||
174 | |||
175 | efx_nic_generate_test_event(channel); | ||
176 | |||
177 | /* Wait for arrival of interrupt */ | ||
178 | count = 0; | ||
179 | do { | ||
180 | schedule_timeout_uninterruptible(HZ / 100); | ||
181 | |||
182 | if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) | ||
183 | goto eventq_ok; | ||
184 | } while (++count < 2); | ||
185 | |||
186 | netif_err(efx, drv, efx->net_dev, | ||
187 | "channel %d timed out waiting for event queue\n", | ||
188 | channel->channel); | ||
189 | |||
190 | /* See if interrupt arrived */ | ||
191 | if (channel->efx->last_irq_cpu >= 0) { | ||
192 | netif_err(efx, drv, efx->net_dev, | ||
193 | "channel %d saw interrupt on CPU%d " | ||
194 | "during event queue test\n", channel->channel, | ||
195 | raw_smp_processor_id()); | ||
196 | tests->eventq_int[channel->channel] = 1; | ||
197 | } | ||
198 | |||
199 | /* Check to see if event was received even if interrupt wasn't */ | ||
200 | if (efx_nic_event_present(channel)) { | ||
201 | netif_err(efx, drv, efx->net_dev, | ||
202 | "channel %d event was generated, but " | ||
203 | "failed to trigger an interrupt\n", channel->channel); | ||
204 | tests->eventq_dma[channel->channel] = 1; | ||
205 | } | ||
206 | |||
207 | return -ETIMEDOUT; | ||
208 | eventq_ok: | ||
209 | netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", | ||
210 | channel->channel); | ||
211 | tests->eventq_dma[channel->channel] = 1; | ||
212 | tests->eventq_int[channel->channel] = 1; | ||
213 | tests->eventq_poll[channel->channel] = 1; | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, | ||
218 | unsigned flags) | ||
219 | { | ||
220 | int rc; | ||
221 | |||
222 | if (!efx->phy_op->run_tests) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&efx->mac_lock); | ||
226 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); | ||
227 | mutex_unlock(&efx->mac_lock); | ||
228 | return rc; | ||
229 | } | ||
230 | |||
231 | /************************************************************************** | ||
232 | * | ||
233 | * Loopback testing | ||
234 | * NB Only one loopback test can be executing concurrently. | ||
235 | * | ||
236 | **************************************************************************/ | ||
237 | |||
238 | /* Loopback test RX callback | ||
239 | * This is called for each received packet during loopback testing. | ||
240 | */ | ||
241 | void efx_loopback_rx_packet(struct efx_nic *efx, | ||
242 | const char *buf_ptr, int pkt_len) | ||
243 | { | ||
244 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
245 | struct efx_loopback_payload *received; | ||
246 | struct efx_loopback_payload *payload; | ||
247 | |||
248 | BUG_ON(!buf_ptr); | ||
249 | |||
250 | /* If we are just flushing, then drop the packet */ | ||
251 | if ((state == NULL) || state->flush) | ||
252 | return; | ||
253 | |||
254 | payload = &state->payload; | ||
255 | |||
256 | received = (struct efx_loopback_payload *) buf_ptr; | ||
257 | received->ip.saddr = payload->ip.saddr; | ||
258 | if (state->offload_csum) | ||
259 | received->ip.check = payload->ip.check; | ||
260 | |||
261 | /* Check that header exists */ | ||
262 | if (pkt_len < sizeof(received->header)) { | ||
263 | netif_err(efx, drv, efx->net_dev, | ||
264 | "saw runt RX packet (length %d) in %s loopback " | ||
265 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | ||
266 | goto err; | ||
267 | } | ||
268 | |||
269 | /* Check that the ethernet header exists */ | ||
270 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | ||
271 | netif_err(efx, drv, efx->net_dev, | ||
272 | "saw non-loopback RX packet in %s loopback test\n", | ||
273 | LOOPBACK_MODE(efx)); | ||
274 | goto err; | ||
275 | } | ||
276 | |||
277 | /* Check packet length */ | ||
278 | if (pkt_len != sizeof(*payload)) { | ||
279 | netif_err(efx, drv, efx->net_dev, | ||
280 | "saw incorrect RX packet length %d (wanted %d) in " | ||
281 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | ||
282 | LOOPBACK_MODE(efx)); | ||
283 | goto err; | ||
284 | } | ||
285 | |||
286 | /* Check that IP header matches */ | ||
287 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | ||
288 | netif_err(efx, drv, efx->net_dev, | ||
289 | "saw corrupted IP header in %s loopback test\n", | ||
290 | LOOPBACK_MODE(efx)); | ||
291 | goto err; | ||
292 | } | ||
293 | |||
294 | /* Check that msg and padding matches */ | ||
295 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | ||
296 | netif_err(efx, drv, efx->net_dev, | ||
297 | "saw corrupted RX packet in %s loopback test\n", | ||
298 | LOOPBACK_MODE(efx)); | ||
299 | goto err; | ||
300 | } | ||
301 | |||
302 | /* Check that iteration matches */ | ||
303 | if (received->iteration != payload->iteration) { | ||
304 | netif_err(efx, drv, efx->net_dev, | ||
305 | "saw RX packet from iteration %d (wanted %d) in " | ||
306 | "%s loopback test\n", ntohs(received->iteration), | ||
307 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | ||
308 | goto err; | ||
309 | } | ||
310 | |||
311 | /* Increase correct RX count */ | ||
312 | netif_vdbg(efx, drv, efx->net_dev, | ||
313 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); | ||
314 | |||
315 | atomic_inc(&state->rx_good); | ||
316 | return; | ||
317 | |||
318 | err: | ||
319 | #ifdef EFX_ENABLE_DEBUG | ||
320 | if (atomic_read(&state->rx_bad) == 0) { | ||
321 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); | ||
322 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
323 | buf_ptr, pkt_len, 0); | ||
324 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); | ||
325 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
326 | &state->payload, sizeof(state->payload), 0); | ||
327 | } | ||
328 | #endif | ||
329 | atomic_inc(&state->rx_bad); | ||
330 | } | ||
331 | |||
332 | /* Initialise an efx_selftest_state for a new iteration */ | ||
333 | static void efx_iterate_state(struct efx_nic *efx) | ||
334 | { | ||
335 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
336 | struct net_device *net_dev = efx->net_dev; | ||
337 | struct efx_loopback_payload *payload = &state->payload; | ||
338 | |||
339 | /* Initialise the layerII header */ | ||
340 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); | ||
341 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); | ||
342 | payload->header.h_proto = htons(ETH_P_IP); | ||
343 | |||
344 | /* saddr set later and used as incrementing count */ | ||
345 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | ||
346 | payload->ip.ihl = 5; | ||
347 | payload->ip.check = htons(0xdead); | ||
348 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); | ||
349 | payload->ip.version = IPVERSION; | ||
350 | payload->ip.protocol = IPPROTO_UDP; | ||
351 | |||
352 | /* Initialise udp header */ | ||
353 | payload->udp.source = 0; | ||
354 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - | ||
355 | sizeof(struct iphdr)); | ||
356 | payload->udp.check = 0; /* checksum ignored */ | ||
357 | |||
358 | /* Fill out payload */ | ||
359 | payload->iteration = htons(ntohs(payload->iteration) + 1); | ||
360 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | ||
361 | |||
362 | /* Fill out remaining state members */ | ||
363 | atomic_set(&state->rx_good, 0); | ||
364 | atomic_set(&state->rx_bad, 0); | ||
365 | smp_wmb(); | ||
366 | } | ||
367 | |||
368 | static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | ||
369 | { | ||
370 | struct efx_nic *efx = tx_queue->efx; | ||
371 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
372 | struct efx_loopback_payload *payload; | ||
373 | struct sk_buff *skb; | ||
374 | int i; | ||
375 | netdev_tx_t rc; | ||
376 | |||
377 | /* Transmit N copies of buffer */ | ||
378 | for (i = 0; i < state->packet_count; i++) { | ||
379 | /* Allocate an skb, holding an extra reference for | ||
380 | * transmit completion counting */ | ||
381 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); | ||
382 | if (!skb) | ||
383 | return -ENOMEM; | ||
384 | state->skbs[i] = skb; | ||
385 | skb_get(skb); | ||
386 | |||
387 | /* Copy the payload in, incrementing the source address to | ||
388 | * exercise the rss vectors */ | ||
389 | payload = ((struct efx_loopback_payload *) | ||
390 | skb_put(skb, sizeof(state->payload))); | ||
391 | memcpy(payload, &state->payload, sizeof(state->payload)); | ||
392 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | ||
393 | |||
394 | /* Ensure everything we've written is visible to the | ||
395 | * interrupt handler. */ | ||
396 | smp_wmb(); | ||
397 | |||
398 | if (efx_dev_registered(efx)) | ||
399 | netif_tx_lock_bh(efx->net_dev); | ||
400 | rc = efx_enqueue_skb(tx_queue, skb); | ||
401 | if (efx_dev_registered(efx)) | ||
402 | netif_tx_unlock_bh(efx->net_dev); | ||
403 | |||
404 | if (rc != NETDEV_TX_OK) { | ||
405 | netif_err(efx, drv, efx->net_dev, | ||
406 | "TX queue %d could not transmit packet %d of " | ||
407 | "%d in %s loopback test\n", tx_queue->queue, | ||
408 | i + 1, state->packet_count, | ||
409 | LOOPBACK_MODE(efx)); | ||
410 | |||
411 | /* Defer cleaning up the other skbs for the caller */ | ||
412 | kfree_skb(skb); | ||
413 | return -EPIPE; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static int efx_poll_loopback(struct efx_nic *efx) | ||
421 | { | ||
422 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
423 | struct efx_channel *channel; | ||
424 | |||
425 | /* NAPI polling is not enabled, so process channels | ||
426 | * synchronously */ | ||
427 | efx_for_each_channel(channel, efx) { | ||
428 | if (channel->work_pending) | ||
429 | efx_process_channel_now(channel); | ||
430 | } | ||
431 | return atomic_read(&state->rx_good) == state->packet_count; | ||
432 | } | ||
433 | |||
434 | static int efx_end_loopback(struct efx_tx_queue *tx_queue, | ||
435 | struct efx_loopback_self_tests *lb_tests) | ||
436 | { | ||
437 | struct efx_nic *efx = tx_queue->efx; | ||
438 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
439 | struct sk_buff *skb; | ||
440 | int tx_done = 0, rx_good, rx_bad; | ||
441 | int i, rc = 0; | ||
442 | |||
443 | if (efx_dev_registered(efx)) | ||
444 | netif_tx_lock_bh(efx->net_dev); | ||
445 | |||
446 | /* Count the number of tx completions, and decrement the refcnt. Any | ||
447 | * skbs not already completed will be free'd when the queue is flushed */ | ||
448 | for (i=0; i < state->packet_count; i++) { | ||
449 | skb = state->skbs[i]; | ||
450 | if (skb && !skb_shared(skb)) | ||
451 | ++tx_done; | ||
452 | dev_kfree_skb_any(skb); | ||
453 | } | ||
454 | |||
455 | if (efx_dev_registered(efx)) | ||
456 | netif_tx_unlock_bh(efx->net_dev); | ||
457 | |||
458 | /* Check TX completion and received packet counts */ | ||
459 | rx_good = atomic_read(&state->rx_good); | ||
460 | rx_bad = atomic_read(&state->rx_bad); | ||
461 | if (tx_done != state->packet_count) { | ||
462 | /* Don't free the skbs; they will be picked up on TX | ||
463 | * overflow or channel teardown. | ||
464 | */ | ||
465 | netif_err(efx, drv, efx->net_dev, | ||
466 | "TX queue %d saw only %d out of an expected %d " | ||
467 | "TX completion events in %s loopback test\n", | ||
468 | tx_queue->queue, tx_done, state->packet_count, | ||
469 | LOOPBACK_MODE(efx)); | ||
470 | rc = -ETIMEDOUT; | ||
471 | /* Allow to fall through so we see the RX errors as well */ | ||
472 | } | ||
473 | |||
474 | /* We may always be up to a flush away from our desired packet total */ | ||
475 | if (rx_good != state->packet_count) { | ||
476 | netif_dbg(efx, drv, efx->net_dev, | ||
477 | "TX queue %d saw only %d out of an expected %d " | ||
478 | "received packets in %s loopback test\n", | ||
479 | tx_queue->queue, rx_good, state->packet_count, | ||
480 | LOOPBACK_MODE(efx)); | ||
481 | rc = -ETIMEDOUT; | ||
482 | /* Fall through */ | ||
483 | } | ||
484 | |||
485 | /* Update loopback test structure */ | ||
486 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | ||
487 | lb_tests->tx_done[tx_queue->queue] += tx_done; | ||
488 | lb_tests->rx_good += rx_good; | ||
489 | lb_tests->rx_bad += rx_bad; | ||
490 | |||
491 | return rc; | ||
492 | } | ||
493 | |||
494 | static int | ||
495 | efx_test_loopback(struct efx_tx_queue *tx_queue, | ||
496 | struct efx_loopback_self_tests *lb_tests) | ||
497 | { | ||
498 | struct efx_nic *efx = tx_queue->efx; | ||
499 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
500 | int i, begin_rc, end_rc; | ||
501 | |||
502 | for (i = 0; i < 3; i++) { | ||
503 | /* Determine how many packets to send */ | ||
504 | state->packet_count = efx->txq_entries / 3; | ||
505 | state->packet_count = min(1 << (i << 2), state->packet_count); | ||
506 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | ||
507 | state->packet_count, GFP_KERNEL); | ||
508 | if (!state->skbs) | ||
509 | return -ENOMEM; | ||
510 | state->flush = false; | ||
511 | |||
512 | netif_dbg(efx, drv, efx->net_dev, | ||
513 | "TX queue %d testing %s loopback with %d packets\n", | ||
514 | tx_queue->queue, LOOPBACK_MODE(efx), | ||
515 | state->packet_count); | ||
516 | |||
517 | efx_iterate_state(efx); | ||
518 | begin_rc = efx_begin_loopback(tx_queue); | ||
519 | |||
520 | /* This will normally complete very quickly, but be | ||
521 | * prepared to wait up to 100 ms. */ | ||
522 | msleep(1); | ||
523 | if (!efx_poll_loopback(efx)) { | ||
524 | msleep(100); | ||
525 | efx_poll_loopback(efx); | ||
526 | } | ||
527 | |||
528 | end_rc = efx_end_loopback(tx_queue, lb_tests); | ||
529 | kfree(state->skbs); | ||
530 | |||
531 | if (begin_rc || end_rc) { | ||
532 | /* Wait a while to ensure there are no packets | ||
533 | * floating around after a failure. */ | ||
534 | schedule_timeout_uninterruptible(HZ / 10); | ||
535 | return begin_rc ? begin_rc : end_rc; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | netif_dbg(efx, drv, efx->net_dev, | ||
540 | "TX queue %d passed %s loopback test with a burst length " | ||
541 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
542 | state->packet_count); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but | ||
548 | * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it | ||
549 | * to delay and retry. Therefore, it's safer to just poll directly. Wait | ||
550 | * for link up and any faults to dissipate. */ | ||
551 | static int efx_wait_for_link(struct efx_nic *efx) | ||
552 | { | ||
553 | struct efx_link_state *link_state = &efx->link_state; | ||
554 | int count, link_up_count = 0; | ||
555 | bool link_up; | ||
556 | |||
557 | for (count = 0; count < 40; count++) { | ||
558 | schedule_timeout_uninterruptible(HZ / 10); | ||
559 | |||
560 | if (efx->type->monitor != NULL) { | ||
561 | mutex_lock(&efx->mac_lock); | ||
562 | efx->type->monitor(efx); | ||
563 | mutex_unlock(&efx->mac_lock); | ||
564 | } else { | ||
565 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
566 | if (channel->work_pending) | ||
567 | efx_process_channel_now(channel); | ||
568 | } | ||
569 | |||
570 | mutex_lock(&efx->mac_lock); | ||
571 | link_up = link_state->up; | ||
572 | if (link_up) | ||
573 | link_up = !efx->mac_op->check_fault(efx); | ||
574 | mutex_unlock(&efx->mac_lock); | ||
575 | |||
576 | if (link_up) { | ||
577 | if (++link_up_count == 2) | ||
578 | return 0; | ||
579 | } else { | ||
580 | link_up_count = 0; | ||
581 | } | ||
582 | } | ||
583 | |||
584 | return -ETIMEDOUT; | ||
585 | } | ||
586 | |||
587 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | ||
588 | unsigned int loopback_modes) | ||
589 | { | ||
590 | enum efx_loopback_mode mode; | ||
591 | struct efx_loopback_state *state; | ||
592 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
593 | struct efx_tx_queue *tx_queue; | ||
594 | int rc = 0; | ||
595 | |||
596 | /* Set the port loopback_selftest member. From this point on | ||
597 | * all received packets will be dropped. Mark the state as | ||
598 | * "flushing" so all inflight packets are dropped */ | ||
599 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
600 | if (state == NULL) | ||
601 | return -ENOMEM; | ||
602 | BUG_ON(efx->loopback_selftest); | ||
603 | state->flush = true; | ||
604 | efx->loopback_selftest = state; | ||
605 | |||
606 | /* Test all supported loopback modes */ | ||
607 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | ||
608 | if (!(loopback_modes & (1 << mode))) | ||
609 | continue; | ||
610 | |||
611 | /* Move the port into the specified loopback mode. */ | ||
612 | state->flush = true; | ||
613 | mutex_lock(&efx->mac_lock); | ||
614 | efx->loopback_mode = mode; | ||
615 | rc = __efx_reconfigure_port(efx); | ||
616 | mutex_unlock(&efx->mac_lock); | ||
617 | if (rc) { | ||
618 | netif_err(efx, drv, efx->net_dev, | ||
619 | "unable to move into %s loopback\n", | ||
620 | LOOPBACK_MODE(efx)); | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | rc = efx_wait_for_link(efx); | ||
625 | if (rc) { | ||
626 | netif_err(efx, drv, efx->net_dev, | ||
627 | "loopback %s never came up\n", | ||
628 | LOOPBACK_MODE(efx)); | ||
629 | goto out; | ||
630 | } | ||
631 | |||
632 | /* Test all enabled types of TX queue */ | ||
633 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
634 | state->offload_csum = (tx_queue->queue & | ||
635 | EFX_TXQ_TYPE_OFFLOAD); | ||
636 | rc = efx_test_loopback(tx_queue, | ||
637 | &tests->loopback[mode]); | ||
638 | if (rc) | ||
639 | goto out; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | out: | ||
644 | /* Remove the flush. The caller will remove the loopback setting */ | ||
645 | state->flush = true; | ||
646 | efx->loopback_selftest = NULL; | ||
647 | wmb(); | ||
648 | kfree(state); | ||
649 | |||
650 | return rc; | ||
651 | } | ||
652 | |||
653 | /************************************************************************** | ||
654 | * | ||
655 | * Entry point | ||
656 | * | ||
657 | *************************************************************************/ | ||
658 | |||
659 | int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | ||
660 | unsigned flags) | ||
661 | { | ||
662 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | ||
663 | int phy_mode = efx->phy_mode; | ||
664 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | ||
665 | struct efx_channel *channel; | ||
666 | int rc_test = 0, rc_reset = 0, rc; | ||
667 | |||
668 | /* Online (i.e. non-disruptive) testing | ||
669 | * This checks interrupt generation, event delivery and PHY presence. */ | ||
670 | |||
671 | rc = efx_test_phy_alive(efx, tests); | ||
672 | if (rc && !rc_test) | ||
673 | rc_test = rc; | ||
674 | |||
675 | rc = efx_test_nvram(efx, tests); | ||
676 | if (rc && !rc_test) | ||
677 | rc_test = rc; | ||
678 | |||
679 | rc = efx_test_interrupts(efx, tests); | ||
680 | if (rc && !rc_test) | ||
681 | rc_test = rc; | ||
682 | |||
683 | efx_for_each_channel(channel, efx) { | ||
684 | rc = efx_test_eventq_irq(channel, tests); | ||
685 | if (rc && !rc_test) | ||
686 | rc_test = rc; | ||
687 | } | ||
688 | |||
689 | if (rc_test) | ||
690 | return rc_test; | ||
691 | |||
692 | if (!(flags & ETH_TEST_FL_OFFLINE)) | ||
693 | return efx_test_phy(efx, tests, flags); | ||
694 | |||
695 | /* Offline (i.e. disruptive) testing | ||
696 | * This checks MAC and PHY loopback on the specified port. */ | ||
697 | |||
698 | /* Detach the device so the kernel doesn't transmit during the | ||
699 | * loopback test and the watchdog timeout doesn't fire. | ||
700 | */ | ||
701 | netif_device_detach(efx->net_dev); | ||
702 | |||
703 | mutex_lock(&efx->mac_lock); | ||
704 | if (efx->loopback_modes) { | ||
705 | /* We need the 312 clock from the PHY to test the XMAC | ||
706 | * registers, so move into XGMII loopback if available */ | ||
707 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) | ||
708 | efx->loopback_mode = LOOPBACK_XGMII; | ||
709 | else | ||
710 | efx->loopback_mode = __ffs(efx->loopback_modes); | ||
711 | } | ||
712 | |||
713 | __efx_reconfigure_port(efx); | ||
714 | mutex_unlock(&efx->mac_lock); | ||
715 | |||
716 | /* free up all consumers of SRAM (including all the queues) */ | ||
717 | efx_reset_down(efx, reset_method); | ||
718 | |||
719 | rc = efx_test_chip(efx, tests); | ||
720 | if (rc && !rc_test) | ||
721 | rc_test = rc; | ||
722 | |||
723 | /* reset the chip to recover from the register test */ | ||
724 | rc_reset = efx->type->reset(efx, reset_method); | ||
725 | |||
726 | /* Ensure that the phy is powered and out of loopback | ||
727 | * for the bist and loopback tests */ | ||
728 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | ||
729 | efx->loopback_mode = LOOPBACK_NONE; | ||
730 | |||
731 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); | ||
732 | if (rc && !rc_reset) | ||
733 | rc_reset = rc; | ||
734 | |||
735 | if (rc_reset) { | ||
736 | netif_err(efx, drv, efx->net_dev, | ||
737 | "Unable to recover from chip test\n"); | ||
738 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
739 | return rc_reset; | ||
740 | } | ||
741 | |||
742 | rc = efx_test_phy(efx, tests, flags); | ||
743 | if (rc && !rc_test) | ||
744 | rc_test = rc; | ||
745 | |||
746 | rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); | ||
747 | if (rc && !rc_test) | ||
748 | rc_test = rc; | ||
749 | |||
750 | /* restore the PHY to the previous state */ | ||
751 | mutex_lock(&efx->mac_lock); | ||
752 | efx->phy_mode = phy_mode; | ||
753 | efx->loopback_mode = loopback_mode; | ||
754 | __efx_reconfigure_port(efx); | ||
755 | mutex_unlock(&efx->mac_lock); | ||
756 | |||
757 | netif_device_attach(efx->net_dev); | ||
758 | |||
759 | return rc_test; | ||
760 | } | ||
761 | |||